blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0156816964cf6a379b4b2931c87865fc01cce196
|
d437914461b775a21ced89300d39893d1bc11c53
|
/apps/about/south_migrations/0002_dated_assoc.py
|
5e8aedfcbbcfbab81c3345c403cb64e7a9a06e34
|
[] |
no_license
|
RumorIO/healersource
|
86975107da02a18eac89bc65c72dd06f71ac2d72
|
681ef09e4044879840f7f0c8bccc836c3cffec3c
|
refs/heads/master
| 2020-12-03T02:18:56.378837
| 2016-02-19T15:32:52
| 2016-02-19T15:32:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,529
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from django.utils import timezone
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for item in orm['oauth_access.userassociation'].objects.all():
ua = orm.UserAssociationDated.objects.create(
user_assoc=item
)
ua.date_created = timezone.datetime(year=2010, month=8, day=15)
ua.save()
#raise RuntimeError("Cannot reverse this migration.")
def backwards(self, orm):
raise RuntimeError("Cannot reverse this migration.")
models = {
u'about.userassociationdated': {
'Meta': {'object_name': 'UserAssociationDated'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'datetime.datetime.now', 'blank': 'True'}),
'user_assoc': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oauth_access.UserAssociation']", 'unique': 'True', 'primary_key': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'oauth_access.userassociation': {
'Meta': {'unique_together': "[('user', 'service')]", 'object_name': 'UserAssociation'},
'expires': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identifier': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'service': ('django.db.models.fields.CharField', [], {'max_length': '75', 'db_index': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['about']
symmetrical = True
|
[
"yg1008@outlook.com"
] |
yg1008@outlook.com
|
f18576ca2bb7da9ebb8e21e6c0c553fd5a202277
|
b31444acce58c4024b440c63b33adc0c54997b5e
|
/Homework4.py
|
7add689c521c59422f576016897e381b6176feb0
|
[] |
no_license
|
Brajam/FE595-H4
|
cc29cc853475a4880ebbf9b420ebce376853b104
|
f16fa5ac8c4147e5aaf68a2395885a9a14befbe2
|
refs/heads/master
| 2020-04-30T16:40:37.023899
| 2019-03-21T13:58:45
| 2019-03-21T13:58:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 15,417
|
py
|
# FE 595: Python for Finance
# Homework 4
# Abraham Jimenez-Berlanga
# CWID 10444147
# Required Libraries
#Request to use GET REST API
from requests import get
#Regular Expressions for cleaning the data
import re as re
#for NLP textblob and different package within TextBlob
from textblob import TextBlob, Word, Blobber
from textblob.classifiers import NaiveBayesClassifier
from textblob.taggers import NLTKTagger
#Pandas for operate with Dataframes
import pandas as pd
#the Urls variable contains all the documents extracted from the discussion board
Urls = (
'https://sit.instructure.com/files/4653201/download?download_frd=1&verifier=aWl6ZvPwzQ6wPLZ8H6yTh7O1lJ4Dq9ryBWReSW3M',
'https://sit.instructure.com/files/4653202/download?download_frd=1&verifier=RiSLiI3W1QZnGN7Oq4dF3mjM0zE3TcnARy1jnHUr',
'https://sit.instructure.com/files/4653446/download?download_frd=1&verifier=SucCkdEmzGqm1k4vUxHAXkLtDHKy2ljscYOBps1q',
'https://sit.instructure.com/files/4653447/download?download_frd=1&verifier=bvAHTS2yRlc0MYTZzDOhKOXMppjbLJGX25WZuyRi',
'https://sit.instructure.com/files/4655884/download?download_frd=1&verifier=82Rk8fyWKUWLPoLeKnEkBVGYszgz1zMuqr1Yvxmv',
'https://sit.instructure.com/files/4655885/download?download_frd=1&verifier=vxxJURlsHhTNxzlceA2hsikGIx9HPA4Dd5ftVC68',
'https://sit.instructure.com/files/4660438/download?download_frd=1&verifier=oiifnM4whzkOWcCVeaCNmPkbWReL0HrkORoYJYlL',
'https://sit.instructure.com/files/4660440/download?download_frd=1&verifier=tifQvxM3nkdoRARw2KvMvLgexX1uujqGV17sEVZB',
'https://sit.instructure.com/files/4666549/download?download_frd=1&verifier=UiLyIz7iutN7ez6GqmzCUnjc53x4QrcCavc4sNpR',
'https://sit.instructure.com/files/4666550/download?download_frd=1&verifier=io9Ijw6wJoZEhmJdjZMu16qzpjk14EZWrvK2QHq9',
'https://sit.instructure.com/files/4667718/download?download_frd=1&verifier=DU8ObbY9paHuz4wW8YGKgovfhtNmQaQvHkJyIRhZ',
'https://sit.instructure.com/files/4667719/download?download_frd=1&verifier=SHKFDKg3xC2y9ab9BqQlqcUbnjolQAvvKPrTurqN',
'https://sit.instructure.com/files/4668745/download?download_frd=1&verifier=5PC9mvQsJzgZx9kK7Z4LslhPzReB8k5XbUS47n0C',
'https://sit.instructure.com/files/4668746/download?download_frd=1&verifier=DpD1L3FZ55oGVyi76k1qEljBI4h9OkSX2atSJQtY',
'https://sit.instructure.com/files/4669155/download?download_frd=1&verifier=eBCwLZIxM7d6qmByB0KWeoi3DQ3Xc90vc5iySeQ1',
'https://sit.instructure.com/files/4669156/download?download_frd=1&verifier=oet2oKbLFOhLuh34rc5H9ttoPLi17x67vOAavPhV',
'https://sit.instructure.com/files/4670532/download?download_frd=1&verifier=cJYQR7ihFvUrw6zwLv8f8iJkuAHxnIXfBDGHrxJO',
'https://sit.instructure.com/files/4670535/download?download_frd=1&verifier=Lo0mnKX2sgdhXdbIYenSfDbAs7eTXdKYgWnNp3rC',
'https://sit.instructure.com/files/4671622/download?download_frd=1&verifier=mNLpQ95JU0YC6aeDmXxMNm4A4I8OPy5nyCnoq9n3',
'https://sit.instructure.com/files/4671626/download?download_frd=1&verifier=J3MZSWBzl03ALzxnSckLLyOh7dppbmxnUfHYqFvn',
'https://sit.instructure.com/files/4672315/download?download_frd=1&verifier=Xv1E95CsSraWVB5EKkzMfQnwMHDBbYOzgrFi0t9B',
'https://sit.instructure.com/files/4672317/download?download_frd=1&verifier=WssW7pSdz9Jxe52T2NEaTN20Sf1qOenVomOgkW86',
'https://sit.instructure.com/files/4672527/download?download_frd=1&verifier=ln55QWKRZczHm5Bc2TbGjqLtaG4mixVuLiGHWZYD',
'https://sit.instructure.com/files/4672530/download?download_frd=1&verifier=VU4NlKLDDuTOiOrrnLLf47IRyOOGx8rD7GGZb1Bo',
'https://sit.instructure.com/files/4674975/download?download_frd=1&verifier=ZheCIHwc0UFwXjmJqRG65P9ZcUzFt8fm9Dqpqrwa',
'https://sit.instructure.com/files/4674976/download?download_frd=1&verifier=GKyV60Y1wi3oG8M5vK9yV53iFGBmPabPpD1Uq9Jz',
'https://sit.instructure.com/files/4675673/download?download_frd=1&verifier=JpPv9gSadHwkDmG9unJuLJQ1RHNPq8RAKyPF3fuL',
'https://sit.instructure.com/files/4675674/download?download_frd=1&verifier=doxOFRXHcnrJ8EsiKN8LQRTtkIczq9m0Jr33OG91',
'https://sit.instructure.com/files/4676819/download?download_frd=1&verifier=0AuOeXPVjUGWvBxhFuibtLx1ttUt6wU5vSEb6dBJ',
'https://sit.instructure.com/files/4676820/download?download_frd=1&verifier=s2T86S3ZxBWPIqKmVk7B1AcOb1rjzOpE0rGr3sI0',
'https://sit.instructure.com/files/4677946/download?download_frd=1&verifier=16cJvD3YVWaJ1FhkJIY2tZo3pyTAA2q1Z7WoMNke',
'https://sit.instructure.com/files/4677947/download?download_frd=1&verifier=Ka04t7lioeP1kmwKe1NdYCBaSl1pYCq7VkfBK7t1',
'https://sit.instructure.com/files/4678026/download?download_frd=1&verifier=GolTN9BYLKNWPmSfNXyF6QNQixjITKFqLTkqq79d',
'https://sit.instructure.com/files/4678030/download?download_frd=1&verifier=I0ulyJuf2Zg879NouvYXtMpYATItfz0UkxVGyq8v',
'https://sit.instructure.com/files/4691540/download?download_frd=1&verifier=tQBKXLAbC6xQgXXOZSzp52aHZ8ygCRLAEEMTk5hm',
'https://sit.instructure.com/files/4691541/download?download_frd=1&verifier=e2cygvJb2uMgwuJv0Ryu3LOatuRHY9I4sxKAySsD')
# First function, to get the files from the web/local directory and merge all of them
def getandmerge(Input:"List containing the file names or urls",Input_type:"Variable indicating if URLs or TXT file"):
# list that it will contain all the characters
full_list = []
# Address the correct option
# URL if the files has to be retrieved from a sharepoint or cloud drive
if Input_type=="URL":
# looping over the URL list to get all the files
for URL in Input:
file = get(URL)
text_i = file.text
full_list.append(text_i)
# TXT if the files are in a local directory
elif Input_type=="TXT":
# looping over the file list to read all the files
for file_i in Input:
with open(file_i,'r') as input_file:
text_i =input_file.read()
full_list.append(text_i)
# Save the list into a file
with open('mergedlist.txt', 'w') as merged_file:
merged_file.write("\n".join(full_list))
# the ouput file name is printed in case that the function is not been called to store the return in a variable
print('You can find all the characters in the file mergedlist.txt')
# File name is returned
return('mergedlist.txt')
getandmerge(Urls,"URL")
def cleanandevaluate(File_name:"Name of the file with the characters"):
# the Input for the function is the filename that contains the characters
# to be cleaned and processed
# We define the list that we'll use for creating our dataframe
# the dataframe will contain the character, if it is male or female
# and the sentiment score
final_list = []
gender_list = []
sentiment_list = []
# first, we open the file and we read line by line, to ensure that we clean and
# standarize the format
with open(File_name,'r') as merged_file:
for position,line in enumerate(merged_file):
# Some of the characters had an extra space at the beginning, so we remove it
# if there were more than 1 space, we should have replaced this simple if
# with a function that will identify how many spaces and remove them
if line[0] == " ":
line = line[1:len(line)]
# as happen with the space, there are some sentences that they have added a '
# at front, so we need to remove it
if line[0] == "'":
line = line[2:len(line)]
# due to the different formating when merging, some characters have an extrac /n
# at the end of the character
if bool(re.search("\n",line)):
line = line[0:len(line)-2]
# the next scenario to clean is to remove " They figth crime" I have choosen this approach
# because of the simplicity, if there were more than the scenarios below, I should have
# created a function that will found the "they" in the string, and remove all the characters
# after They. I choose this approach because of the simplicity
if bool(re.search(" They fight crime!",line)):
line = line[0:len(line)-len(" They fight crime!")]
if bool(re.search(" They fight crime",line)):
line = line[0:len(line)-len(" They fight crime")]
if bool(re.search(" They fight crim",line)):
line = line[0:len(line)-len(" They fight crim")]
if line[0:1]=='[':
# one of the files has been saved with the list format (with [] delimiting the list and separated by ,
# to clear this, once the record is identified -begins with [-, we convert it to a List
line = list(line)
for i in range(0, len(line)):
if bool(re.search("She's", line[i])):
gender_list.append("female")
final_list.append(line[i])
tb_line = TextBlob(line[i])
sentiment_list.append(tb_line.sentiment.polarity)
elif bool(re.search("He's", line[i])):
gender_list.append("male")
final_list.append(line[i])
tb_line = TextBlob(line[i])
sentiment_list.append(tb_line.sentiment.polarity)
# while reading the file, one of the issues found was that the 50 chactares were stored as one
# string, so we identify this scenario and additionally, moving forward, I'm going to identify
# if it is male/femail and calculate the score for the character.
elif bool(re.search("She's", line)) and len(line)>200 and (line[0] == "S" or line[1]=="S"):
iprev =0
for i in range(0, len(line)):
if i < (len(line) -5):
# although I could use the Regular Expressions for splitting the 50 characters
# saved as 1 string, i feel more confortable identifying in which position does "she's" begin
# and delimiting with it
if line[i:i+5] =="She's":
gender_list.append("female")
final_list.append(line[iprev:i])
# During the following steps, i will be using TextBlob, creating the tb_line object and using it
# to score the polarity
tb_line = TextBlob(line)
sentiment_list.append(tb_line.sentiment.polarity)
iprev = i
elif bool(re.search("He's", line)) and len(line) > 200:
iprev =0
for i in range(0, len(line)):
if i < (len(line) -4):
if line[i:i+4] =="He's":
gender_list.append("male")
final_list.append(line[iprev:i])
tb_line = TextBlob(line)
sentiment_list.append(tb_line.sentiment.polarity)
iprev =i
elif bool(re.search("She's", line)):
gender_list.append("female")
final_list.append(line)
tb_line = TextBlob(line)
sentiment_list.append(tb_line.sentiment.polarity)
elif bool(re.search("He's",line)):
gender_list.append("male")
final_list.append(line)
tb_line = TextBlob(line)
sentiment_list.append(tb_line.sentiment.polarity)
elif bool(re.search("she's", line)):
gender_list.append("female")
final_list.append(line)
tb_line = TextBlob(line)
sentiment_list.append(tb_line.sentiment.polarity)
elif bool(re.search("he's", line)):
gender_list.append("male")
final_list.append(line)
tb_line = TextBlob(line)
sentiment_list.append(tb_line.sentiment.polarity)
else:
# some of the characters were saved after removing the she's/he's leaving the characters un identified
# althought it could be possible in some of them to identify if it was a male/female the number of records
# identify vs processed will be really low
# Additionally, after reading, empty lines are created because of the break page indicator. this still will remove
# them from the final list
scenario = "third"
data_sentence = {'Sentence':final_list,'Gender':gender_list,'Sentiment':sentiment_list}
# creation of the dataframe that it will be used for sorting and filtering
dataframe_sentences = pd.DataFrame(data_sentence)
dataframe_sentences.sort_values(by='Sentiment',ascending=False)
female_score = dataframe_sentences[(dataframe_sentences.Gender == "female")]
female_score = female_score.sort_values(by='Sentiment',ascending=False)
female_score.index = list(range(1, len(female_score) + 1))
male_score = dataframe_sentences[(dataframe_sentences.Gender == "male")]
male_score = male_score.sort_values(by='Sentiment',ascending=False)
male_score.index = list(range(1, len(male_score) + 1))
# Results:
print("The worst female character is:",female_score.at[len(female_score),"Sentence"])
print("The best female character is:",female_score.at[1,"Sentence"])
print("The worst male character is:",male_score.at[len(male_score),"Sentence"])
print("The best male character is:",male_score.at[1,"Sentence"])
print(male_score.at[1,"Sentence"],female_score.at[1,"Sentence"],"They fight Crime!")
print(male_score.at[len(male_score),"Sentence"],female_score.at[len(female_score),"Sentence"],"They fight Crime!")
# Saving the list cleaned to be used in further analysis if needed
with open('cleanedlist.txt', 'w') as cleaned_file:
for line_final in final_list:
cleaned_file.write(line_final)
cleaned_file.write("\n")
# the ouput file name is printed in case that the function is not been called to store the return in a variable
print('The list of characters cleaned can be found in cleanedlist.txt')
# Name of the file with the final list as returned result
return('cleanedlist.txt')
cleanandevaluate('mergedlist.txt')
def topdescriptions(File_name:"Name of the file with the characters"):
# Function to identify most common descriptors
# As input we'll include the list of characters cleaned
with open(File_name,'r') as whole_file:
# Reading the whole file, as TexBlob will be applied to the whole
whole_text = whole_file.read()
tb_text = TextBlob(whole_text)
adj_list =[]
# Once the TextBlob is created, a loop is performed over the list of words
# to identify the descriptors (PoS like JJ are adjetives)
for word,pos in tb_text.tags:
if pos[0:2] =="JJ":
adj_list.append(word)
# Creating a Dataframe indicating how many times the adjetive is on the list
count_dataframe = pd.Series(adj_list).value_counts()
# Printing top ten
print("The 10 most common descriptions for characters:")
print(count_dataframe.head(n=10))
topdescriptions('cleanedlist.txt')
|
[
"47007305+Brajam@users.noreply.github.com"
] |
47007305+Brajam@users.noreply.github.com
|
757ee3c68220ba259470fa444f19044c109520d5
|
c772d432aae55b5ceebd4fa616f5f497dc2b756e
|
/AudioCollection.py
|
fe5fa8b0d19405b633bfba7200382ad3b278b80e
|
[] |
no_license
|
GabitovRuslan/Home_work_OOP
|
534471e3eab6228019479ed1ee866c4418eb5572
|
ad1f243baebfd82d2373c23e68ddf8bad40a1763
|
refs/heads/master
| 2023-01-19T09:29:13.760802
| 2020-11-26T20:50:13
| 2020-11-26T20:50:13
| 315,713,078
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,379
|
py
|
class Track:
info_track = []
def __init__(self, name_track, duration):
self.name_track = name_track
self.duration = duration
def show(self):
return f'{self.name_track} - {self.duration} мин.'
class Album:
def __init__(self, name_album, group):
self.name_album = name_album
self.group = group
self.track_list = []
def add_tracks(self, x):
self.track_list.append(x)
def get_tracks(self):
print(f'Название альбома: {self.name_album}\nИсполнитель: {self.group}')
for x in self.track_list:
print(x.show())
def get_duration(self):
max_time = sum(x.duration for x in self.track_list)
print(f'Общая длина всех треков {max_time}')
track1 = Track('фантазер', 3)
track2 = Track('"Фан"', 5)
track3 = Track('"Буум>"', 5)
track4 = Track('фантазер', 8)
track5 = Track('"Фан"', 7)
track6 = Track('"Буум>"', 6)
album_apple = Album('Яблоко', 'На-на')
album_one = Album('Один', 'ДДТ')
album_apple.add_tracks(track1)
album_apple.add_tracks(track2)
album_apple.add_tracks(track3)
album_one.add_tracks(track4)
album_one.add_tracks(track5)
album_one.add_tracks(track6)
album_apple.get_tracks()
album_apple.get_duration()
album_one.get_tracks()
album_one.get_duration()
|
[
"89655662408@bk.ru"
] |
89655662408@bk.ru
|
069e3fb662be70b8084a9964335c6530bed4f56c
|
cd9ee99d2966a7b796ef96dd013d28ad39b8a16d
|
/Flask/app.py
|
04ec6a9c371f4569f7b91d4beadb80edf9dc9c24
|
[] |
no_license
|
mato7927/RoCo
|
7e48e0a9e253f306b2ec6ccc6c0f929c16e3097e
|
076acf853a4dcf278f6885d35e85a04f7fdc8373
|
refs/heads/master
| 2022-12-19T15:47:50.384146
| 2020-08-17T17:11:26
| 2020-08-17T17:11:26
| 299,575,985
| 0
| 0
| null | 2020-09-29T10:01:39
| 2020-09-29T10:00:27
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 704
|
py
|
from flask import (Flask, render_template, request, session, redirect, url_for, g, flash)
from flask import Flask, render_template, request
# from chatbot import chatbot
from ChatBot import RoCo
chatbot = RoCo()
app = Flask(__name__)
app.secret_key = 'helloworld@tiemoko'
@app.route("/app")
def chat():
return render_template("app.html")
@app.route("/app")
def getBotResponse():
userText = request.args.get('msg')
print(userText)
return str(chatbot.get_RoCo_Response(userText))
# if request.method == 'POST':
# userText = request.form['msg']
# print(userText)
# return str(chatbot.get_RoCo_Response(userText))
if __name__ == "__main__":
app.run()
|
[
"tiems@mail.com"
] |
tiems@mail.com
|
fc9487662f3fdca2d286a364988c65b7eb5c9b73
|
4887a1a84e5ae0a3f4e23c41576b53e11e56840c
|
/main.py
|
5904e49fff56850ef1ba4f6626f0ca070fa7c113
|
[] |
no_license
|
liujwplayer/python
|
c39dfd9d76034e9f4f8dd053442d3cbf3b220020
|
5e270a06c6c0a13cbabb409cebd64fdc6b3150d2
|
refs/heads/master
| 2020-04-01T08:29:03.386841
| 2018-10-28T05:41:56
| 2018-10-28T05:41:56
| 153,032,626
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 769
|
py
|
#-*-coding:utf-8-*-
from driver import Driver
from car import Car
from stop import Stop
from parking_record import ParkingRecord
from order import Order
from stop_place import StopPlace
import datetime
import time
car1 = Car('辽A666','小型轿车','白色','车牌为蓝色','福特—福睿斯')
driver1 = Driver('9527','鸡小萌','1381111111',car1.car_logo)
parkingrecord1 = ParkingRecord('9527','辽A666')
stopplace1 = StopPlace('9527','辽A666')
order = Order('1','50','支付宝','未支付','鸡小萌','福特—福睿斯','',"未完成")
car1.car_message()
driver1.driver_car()
parkingrecord1.start()
stopplace1.place_stop_time()
driver1.driver_shopping()
time.sleep(5)
driver1.driver_shopped()
stopplace1.place_go_time()
parkingrecord1.end()
order.pay()
|
[
"liujwplayer@163.com"
] |
liujwplayer@163.com
|
448a6b8cbdfdcd2a285718950d9165950a94fe09
|
def5327c20c644ca6b2a56140b512ea6ac075994
|
/HW_5/Bodnar/Kata_5_3.py
|
a93a2eff8ff6b8e985d485a80e9b6b589ba4dd55
|
[] |
no_license
|
iamanobject/Lv-568.2.PythonCore
|
662cdfc5d91d80e83463b5f0beee53a6e63f4062
|
939d7035f602d0f035ea8b6526c36b7153d0bd53
|
refs/heads/main
| 2023-03-27T18:24:13.605364
| 2021-03-11T13:36:44
| 2021-03-11T13:36:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
def are_you_playing_banjo(name):
if name[0].lower() == "r":
return name + " plays banjo"
else:
return name + " does not play banjo"
|
[
"v.bodnar2013@gmail.com"
] |
v.bodnar2013@gmail.com
|
1c76d4428e22cdee21c529dee4e0ece436c0a434
|
123e1f24d81ad9c4b48cb189c7dca3e965d708f5
|
/my_code/loops5.py
|
8dc30f79906f789f868b33561fdf3bf76f228bdf
|
[] |
no_license
|
DouglasAllen/Python-projects
|
03b71dd29b9f6cb037fd1db30ded83c7897dbd99
|
a5d2da0c611973f34a1eb125926eb4ef9e23814e
|
refs/heads/master
| 2021-01-02T22:32:16.840133
| 2014-02-04T16:55:44
| 2014-02-04T16:55:44
| 12,250,138
| 0
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 86
|
py
|
num = 100
while not False:
if num < 0:
break
print('num is: ' + str(num))
|
[
"kb9agt@gmail.com"
] |
kb9agt@gmail.com
|
6ef14f05662ab3563f7bda897991de76c49674fb
|
3235a66ef994e044252ee43aa522aff8c6d99c1c
|
/First Project/tution/migrations/0001_initial.py
|
4fb494de000c3c381c5b72a92c9d370f9e661798
|
[] |
no_license
|
MasumTech/Tuition-Media
|
a6e77e1ff8b5c0a1fe195ce4d0e8d489681de73c
|
e50bbc2eda2a0f979741420144967263a67a0ccc
|
refs/heads/master
| 2023-02-26T07:28:14.532322
| 2021-01-30T15:57:57
| 2021-01-30T15:57:57
| 332,982,827
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 597
|
py
|
# Generated by Django 3.1.5 on 2021-01-25 11:18
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Contact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('message', models.TextField()),
],
),
]
|
[
"masumrezadiu@gmail.com"
] |
masumrezadiu@gmail.com
|
7bedcde6128086e2fe8237954e3e2c28bc8f00be
|
b619c3b5c5c33ea1d952818f808305f50e4b88d7
|
/data/management/commands/import_school_breakfast_participation.py
|
54510a66bbc29d6a365d9771ad2ad2714b6d0976
|
[] |
no_license
|
jroo/npp_api
|
457e7c9fcc68dd12ad092bc5a5717fcd0652172d
|
16164278321c6aeaeb2e9e5947c1e8f7394a8521
|
refs/heads/master
| 2021-01-01T17:27:25.131036
| 2011-04-29T15:07:35
| 2011-04-29T15:07:35
| 375,120
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,005
|
py
|
from django import db
from django.conf import settings
from django.core.management.base import NoArgsCommand
from data.models import SchoolBreakfastParticipation
import csv
# National Priorities Project Data Repository
# import_school_breakfast_participation.py
# Updated 7/27/2010, Joshua Ruihley, Sunlight Foundation
# Imports USDA State Level School Breakfast Participation Data
# source info: http://www.fns.usda.gov/pd/08sbfypart.htm (accurate as of 7/27/2010)
# npp csv: http://assets.nationalpriorities.org/raw_data/hunger/school_breakfast_participation.csv (updated 7/27/2010)
# destination model: SchoolBreakfastParticipation
# HOWTO:
# 1) Download source files from url listed above
# 2) Convert source file to .csv with same formatting as npp csv
# 3) change SOURCE_FILE variable to the the path of the source file you just created
# 4) change 'amount' column in data_SchoolBreakfastParticipation table to type 'bigint'
# 5) Run as Django management command from your project path "python manage.py import_school_breakfast_participation"
SOURCE_FILE = '%s/hunger/school_breakfast_participation.csv' % (settings.LOCAL_DATA_ROOT)
class Command(NoArgsCommand):
def handle_noargs(self, **options):
def clean_int(value):
if value.strip()=='':
value=None
else:
value=int(value)
return value
data_reader = csv.reader(open(SOURCE_FILE))
for i, row in enumerate(data_reader):
if i == 0:
year_row = row;
else:
state = row[0]
for j,col in enumerate(row):
if j > 0:
record = SchoolBreakfastParticipation()
record.year = int(year_row[j])
record.state = state
record.value = clean_int(col)
record.save()
db.reset_queries()
|
[
"ruihley@gmail.com"
] |
ruihley@gmail.com
|
ac614e6eb9825daee30e9c3e04946d44d3569ee9
|
68cf7c25bb614883c50d21e5051fbea8dbf18ccb
|
/ecommercejockey/premier/migrations/0009_premiermanufacturer_slug.py
|
12ef97d4ae6d8570eb42baba688e544222f772ac
|
[
"MIT"
] |
permissive
|
anniethiessen/ecommerce-jockey
|
63bf5af6212a46742dee98d816d0bc2cdb411708
|
9268b72553845a4650cdfe7c88b398db3cf92258
|
refs/heads/master
| 2022-12-14T02:29:25.140796
| 2021-05-15T01:20:30
| 2021-05-15T01:20:30
| 211,400,595
| 1
| 1
|
MIT
| 2022-12-08T06:45:40
| 2019-09-27T20:57:19
|
Python
|
UTF-8
|
Python
| false
| false
| 432
|
py
|
# Generated by Django 2.2.5 on 2019-12-05 06:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('premier', '0008_relevancy_and_notes'),
]
operations = [
migrations.AddField(
model_name='premiermanufacturer',
name='slug',
field=models.CharField(blank=True, null=True, max_length=20, unique=True),
),
]
|
[
"anniethiessen79@gmail.com"
] |
anniethiessen79@gmail.com
|
00027bc4b2d75ba9c63a911a45fe7b37e3fbfe27
|
90b31988c165366c76f935dc334161d997ce7100
|
/home/migrations/0007_blogpost.py
|
1ed585cedd1fe31539dd331b650754a7ade8718f
|
[] |
no_license
|
Ruturaj271120/TourGuide
|
77d0ab05aaeb07e20524ef1c765582052ce8b4a7
|
fd78591fd0fbae99b754b10109d96fd473009a8f
|
refs/heads/master
| 2023-06-15T21:10:39.950790
| 2021-07-13T16:18:48
| 2021-07-13T16:18:48
| 385,126,502
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,054
|
py
|
# Generated by Django 3.1.5 on 2021-01-22 09:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0006_orderupdate'),
]
operations = [
migrations.CreateModel(
name='Blogpost',
fields=[
('post_id', models.AutoField(primary_key=True, serialize=False)),
('tilte', models.CharField(max_length=50)),
('head0', models.CharField(default='', max_length=500)),
('chead0', models.CharField(default='', max_length=5000)),
('head1', models.CharField(default='', max_length=500)),
('chead1', models.CharField(default='', max_length=5000)),
('head2', models.CharField(default='', max_length=500)),
('chead2', models.CharField(default='', max_length=5000)),
('pub_date', models.DateField()),
('thumbnail', models.ImageField(default='', upload_to='shop/images')),
],
),
]
|
[
"dyawanpalli@gmail.com"
] |
dyawanpalli@gmail.com
|
c1c7769ff0a56f13f170b6f197ac983c46757b87
|
a992a2bcea0e40a9cc3d947466a7eaafaf337280
|
/python-language/files/walking.py
|
aac57b1257bca4c10be89ca6560685f7c9667083
|
[] |
no_license
|
rodrigoferrazazevedo/dojo-python
|
6bcfb4bf27435b5e60fa1c992840e319fe2dbbb3
|
b3aebfae8b0dae2c2b898431ed747dd9236706e3
|
refs/heads/master
| 2022-12-24T10:25:10.692833
| 2022-06-22T02:12:18
| 2022-06-22T02:12:18
| 107,460,652
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 289
|
py
|
import os
for root, dirs, files in os.walk('.'):
print(os.path.abspath(root))
if dirs:
print('Directories:')
for dir_ in dirs:
print(dir_)
if files:
print('Files:')
for filename in files:
print(filename)
print()
|
[
"rodrigo.azevedo@sintesoft.com.br"
] |
rodrigo.azevedo@sintesoft.com.br
|
588a087c3218bc431f07e9c706dca62735a9b86e
|
0f1d2be366fbfd07a3e6961e6c3878d93a460607
|
/init_mod_wayfinder.py
|
bdf11aae6f90fc23fc47d9505fea57f83e0c1ffe
|
[
"Apache-2.0"
] |
permissive
|
minrva/db-wayfinder
|
f659d0f71b9baad74bfd0071a5cdfe0cecbf4333
|
667d7e1b0ccf9cabc1755153d406ffae4762be98
|
refs/heads/master
| 2020-04-26T05:05:14.112447
| 2019-04-16T19:47:33
| 2019-04-16T19:47:33
| 173,322,414
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,848
|
py
|
# pylint: disable=invalid-name
"""Digests mod-wayfinder json data."""
import csv, json, os, uuid, dataset
# directory
DATA_DIR = 'data'
# postgres
PG_USER = 'folio_admin'
PG_PASSWORD = 'folio_admin'
PG_NETLOC = '10.0.2.15'
PG_PORT = '5432'
PG_DBNAME = 'okapi_modules'
PG_URL = ("postgresql://" + PG_USER + ":" + PG_PASSWORD +
'@' + PG_NETLOC + ':' + PG_PORT + '/' + PG_DBNAME)
# wayfinder
WF_SCHEMA = 'diku_mod_wayfinder'
WF_SHELVES_TBL = 'shelves'
SHELVES_CSV_FILE = 'shelves.csv'
SHELVES_JSON_FILE = 'shelves.json'
# holdings record
HOLDINGS_SCHEMA = 'diku_mod_inventory_storage'
HOLDINGS_TBL = 'holdings_record'
MAIN_LOCATION_ID = 'fcd64ce1-6995-48f0-840e-89ffa2288371'
INSTANCE_IDS = [
'b5b13415-145b-4e61-aaa8-aecf6a4a0571',
'ef3641e5-ead0-4409-a485-4ab0059646c5',
'0e3f5a3d-79c5-4252-96ea-6c0c0dbe4e7e',
'de1c4934-f4dc-4ab1-8548-16915e682dd2',
'b21d2059-dc52-4afa-b12c-6870f0680389'
]
CALL_NOS = [
'CT502 .E542 1998',
'F215 .W85 1951',
'E302.6.F8 V362 1945',
'UA23.15 .F45 2002',
'D102 .M38 2002'
]
def load_csv(fpath):
"""Loads a CSV file."""
d = []
with open(fpath, encoding='utf-8-sig') as fs:
csvReader = csv.DictReader(fs)
for csvRow in csvReader:
d.append(csvRow)
print("Loaded {0} objects from {1}...".format(len(d), fpath))
return d
def load_json(fpath):
"""Loads a JSON file."""
with open(fpath) as fs:
d = json.load(fs)
print("Loaded {0} objects from {1}...".format(len(d), fpath))
return d
def load_table(tbl_name, schema_name):
"""Loads a postgres table."""
rows = []
with dataset.Database(url=PG_URL, schema=schema_name) as db:
tbl = db[tbl_name]
print("Loaded {0} rows from {1}.{2}...".format(
len(tbl), schema_name, tbl_name))
for row in tbl:
rows.append(row)
db.executable.close()
db = None
return rows
def csv_to_json(cpath, jpath):
"""Transforms CSV file to JSON file."""
print("Transforming {0} to {1}...".format(cpath, jpath))
shelves_csv = load_csv(cpath)
with open(jpath, "w") as json_file:
json_file.write(json.dumps(shelves_csv, indent=4))
def populate_table(rows, tbl_name, schema_name, clear=True):
"""Creates a postgres table."""
print("Saving {0} rows to {1}.{2}...".format(
len(rows), schema_name, tbl_name))
with dataset.Database(url=PG_URL, schema=schema_name) as db:
table = db[tbl_name]
if clear:
table.delete()
table.insert_many(rows)
db.executable.close()
db = None
def create_shelf_row(data):
"""Creates a shelf row."""
new_obj = {}
new_obj['id'] = str(uuid.uuid4())
new_obj['permanentLocationId'] = MAIN_LOCATION_ID
new_obj['label'] = data['label']
new_obj['lowerBound'] = data['lowerBound']
new_obj['upperBound'] = data['upperBound']
new_obj['mapTitle'] = data['mapTitle']
new_obj['mapUri'] = data['mapUri']
new_obj['x'] = data['x']
new_obj['y'] = data['y']
return dict(jsonb=new_obj)
def create_shelf_rows(shelves):
"""Creates shelf rows from json array."""
print('Transforming data to database rows...')
rows = []
for shelf in shelves:
row = create_shelf_row(shelf)
rows.append(row)
return rows
def update_holdings_record(row_id, call_no, tbl_name, schema_name):
"""Updates a holding record call number by instance id."""
print("Updating call no. to {0} for row {1} in {2}.{3}...".format(
call_no, row_id, schema_name, tbl_name))
with dataset.Database(url=PG_URL, schema=schema_name) as db:
tbl = db[tbl_name]
row = tbl.find_one(instanceid=row_id)
if row is not None:
row['jsonb']['callNumber'] = call_no
tbl.upsert(row, ['_id'])
db.executable.close()
db = None
def update_holdings_records(instance_ids, call_nos, tbl_name, schema_name):
"""Updates a batch of holding record call numbers by instance ids."""
for instance_id, call_no in zip(instance_ids, call_nos):
update_holdings_record(instance_id, call_no, tbl_name, schema_name)
if __name__ == '__main__':
# transformed csv to json
# csv_path = os.path.join(DATA_DIR, SHELVES_CSV_FILE)
# json_path = os.path.join(DATA_DIR, SHELVES_JSON_FILE)
# csv_to_json(csv_path, json_path)
# update existing FOLIO holding records with call numbers
update_holdings_records(INSTANCE_IDS, CALL_NOS,
HOLDINGS_TBL, HOLDINGS_SCHEMA)
# load sample shelves into diku_mod_wayfinder.shelves
shelves_path = os.path.join(DATA_DIR, SHELVES_JSON_FILE)
shelves_json = load_json(shelves_path)
shelf_rows = create_shelf_rows(shelves_json)
populate_table(shelf_rows, WF_SHELVES_TBL, WF_SCHEMA)
print('Complete...')
|
[
"ryckman1@illinois.edu"
] |
ryckman1@illinois.edu
|
1d650cb249a55125346b1609aac81c061af9c1cd
|
19a7cf9c822a9d21211426fca3dc7ff856cf6087
|
/aiozmq/_test_util.py
|
a4ed41a81294fc4dd1061711b9f2f1a1593f6f47
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
pombredanne/aiozmq
|
91bd19fb212b15b7a9082668ae1c1e2405067412
|
2b482511f244f7049ee9332ae283d1d47eef0d5b
|
refs/heads/master
| 2020-12-30T22:10:18.018880
| 2014-12-28T15:25:23
| 2014-12-28T15:25:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,454
|
py
|
"""Private test support utulities"""
import contextlib
import functools
import logging
import platform
import socket
import sys
import time
import unittest
class Error(Exception):
"""Base class for regression test exceptions."""
class TestFailed(Error):
"""Test failed."""
def _requires_unix_version(sysname, min_version): # pragma: no cover
"""Decorator raising SkipTest if the OS is `sysname` and the
version is less than `min_version`.
For example, @_requires_unix_version('FreeBSD', (7, 2)) raises SkipTest if
the FreeBSD version is less than 7.2.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if platform.system() == sysname:
version_txt = platform.release().split('-', 1)[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"%s version %s or higher required, not %s"
% (sysname, min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
def requires_freebsd_version(*min_version): # pragma: no cover
"""Decorator raising SkipTest if the OS is FreeBSD and the FreeBSD
version is less than `min_version`.
For example, @requires_freebsd_version(7, 2) raises SkipTest if the FreeBSD
version is less than 7.2.
"""
return _requires_unix_version('FreeBSD', min_version)
def requires_linux_version(*min_version): # pragma: no cover
"""Decorator raising SkipTest if the OS is Linux and the Linux version is
less than `min_version`.
For example, @requires_linux_version(2, 6, 32) raises SkipTest if the Linux
version is less than 2.6.32.
"""
return _requires_unix_version('Linux', min_version)
def requires_mac_ver(*min_version): # pragma: no cover
"""Decorator raising SkipTest if the OS is Mac OS X and the OS X
version if less than min_version.
For example, @requires_mac_ver(10, 5) raises SkipTest if the OS X version
is lesser than 10.5.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if sys.platform == 'darwin':
version_txt = platform.mac_ver()[0]
try:
version = tuple(map(int, version_txt.split('.')))
except ValueError:
pass
else:
if version < min_version:
min_version_txt = '.'.join(map(str, min_version))
raise unittest.SkipTest(
"Mac OS X %s or higher required, not %s"
% (min_version_txt, version_txt))
return func(*args, **kw)
wrapper.min_version = min_version
return wrapper
return decorator
# Don't use "localhost", since resolving it uses the DNS under recent
# Windows versions (see issue #18792).
HOST = "127.0.0.1"
HOSTv6 = "::1"
def _is_ipv6_enabled(): # pragma: no cover
"""Check whether IPv6 is enabled on this host."""
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind((HOSTv6, 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
def find_unused_port(family=socket.AF_INET,
socktype=socket.SOCK_STREAM): # pragma: no cover
"""Returns an unused port that should be suitable for binding. This is
achieved by creating a temporary socket with the same family and type as
the 'sock' parameter (default is AF_INET, SOCK_STREAM), and binding it to
the specified host address (defaults to 0.0.0.0) with the port set to 0,
eliciting an unused ephemeral port from the OS. The temporary socket is
then closed and deleted, and the ephemeral port is returned.
Either this method or bind_port() should be used for any tests where a
server socket needs to be bound to a particular port for the duration of
the test. Which one to use depends on whether the calling code is creating
a python socket, or if an unused port needs to be provided in a constructor
or passed to an external program (i.e. the -accept argument to openssl's
s_server mode). Always prefer bind_port() over find_unused_port() where
possible. Hard coded ports should *NEVER* be used. As soon as a server
socket is bound to a hard coded port, the ability to run multiple instances
of the test simultaneously on the same host is compromised, which makes the
test a ticking time bomb in a buildbot environment. On Unix buildbots, this
may simply manifest as a failed test, which can be recovered from without
intervention in most cases, but on Windows, the entire python process can
completely and utterly wedge, requiring someone to log in to the buildbot
and manually kill the affected process.
(This is easy to reproduce on Windows, unfortunately, and can be traced to
the SO_REUSEADDR socket option having different semantics on Windows versus
Unix/Linux. On Unix, you can't have two AF_INET SOCK_STREAM sockets bind,
listen and then accept connections on identical host/ports. An EADDRINUSE
OSError will be raised at some point (depending on the platform and
the order bind and listen were called on each socket).
However, on Windows, if SO_REUSEADDR is set on the sockets, no EADDRINUSE
will ever be raised when attempting to bind two identical host/ports. When
accept() is called on each socket, the second caller's process will steal
the port from the first caller, leaving them both in an awkwardly wedged
state where they'll no longer respond to any signals or graceful kills, and
must be forcibly killed via OpenProcess()/TerminateProcess().
The solution on Windows is to use the SO_EXCLUSIVEADDRUSE socket option
instead of SO_REUSEADDR, which effectively affords the same semantics as
SO_REUSEADDR on Unix. Given the propensity of Unix developers in the Open
Source world compared to Windows ones, this is a common mistake. A quick
look over OpenSSL's 0.9.8g source shows that they use SO_REUSEADDR when
openssl.exe is called with the 's_server' option, for example. See
http://bugs.python.org/issue2550 for more info. The following site also
has a very thorough description about the implications of both REUSEADDR
and EXCLUSIVEADDRUSE on Windows:
http://msdn2.microsoft.com/en-us/library/ms740621(VS.85).aspx)
XXX: although this approach is a vast improvement on previous attempts to
elicit unused ports, it rests heavily on the assumption that the ephemeral
port returned to us by the OS won't immediately be dished back out to some
other process when we close and delete our temporary socket but before our
calling code has a chance to bind the returned port. We can deal with this
issue if/when we come across it.
"""
tempsock = socket.socket(family, socktype)
port = bind_port(tempsock)
tempsock.close()
del tempsock
return port
def bind_port(sock, host=HOST): # pragma: no cover
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, 'SO_REUSEADDR'):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise TestFailed("tests should never set the SO_REUSEADDR "
"socket option on TCP/IP sockets!")
if hasattr(socket, 'SO_REUSEPORT'):
try:
if (sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
== 1):
raise TestFailed("tests should never set the SO_REUSEPORT "
"socket option on TCP/IP sockets!")
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, 'SO_EXCLUSIVEADDRUSE'):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
def check_errno(errno, exc):
assert isinstance(exc, OSError), exc
assert exc.errno == errno, (exc, errno)
class TestHandler(logging.Handler):
def __init__(self, queue):
super().__init__()
self.queue = queue
def emit(self, record):
time.sleep(0)
self.queue.put_nowait(record)
@contextlib.contextmanager
def log_hook(logname, queue):
logger = logging.getLogger(logname)
handler = TestHandler(queue)
logger.addHandler(handler)
level = logger.level
logger.setLevel(logging.DEBUG)
try:
yield
finally:
logger.removeHandler(handler)
logger.level = level
class RpcMixin:
def close_service(self, service):
if service is None:
return
loop = service._loop
service.close()
loop.run_until_complete(service.wait_closed())
|
[
"andrew.svetlov@gmail.com"
] |
andrew.svetlov@gmail.com
|
988731abc2a0902f70729426b8761eec16feb508
|
d60b41462ab5dd83f3e95d740cacc2d1e5051232
|
/plugins/dbnd-test-scenarios/src/dbnd_test_scenarios/pipelines/pipeline_prepare_config.py
|
9e85364dc78366a72d43f9befa199eca38159e11
|
[
"Apache-2.0"
] |
permissive
|
hyunjay/dbnd
|
0581ca493fc2a3fa1cab74d0bb8fa620b1669f85
|
ab5a8ebf5984e73d0c7129a6898fed98a239b90b
|
refs/heads/master
| 2023-07-05T10:02:07.112720
| 2021-08-05T13:53:50
| 2021-08-05T13:53:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 732
|
py
|
import logging
from dbnd import log_metric, pipeline, task
from targets.types import PathStr
logger = logging.getLogger(__name__)
@task
def create_model(y_input="y"):
logger.info("Running %s -> operation_y", y_input)
log_metric("a", 1)
return "{} -> operation_y".format(y_input)
@task
def prepare_cfg(base_cfg, model):
# type:(PathStr,PathStr) -> object
return {"a": model}
@task
def run_cfg(cfg, coeff=2):
# type:(PathStr, int) -> str
logger.info("Running %s -> operation_z", cfg)
return "cfg({})".format(cfg)
@pipeline
def pipe_operations(base_cfg):
# type: (PathStr)->str
model = create_model()
cfg = prepare_cfg(base_cfg=base_cfg, model=model)
return run_cfg(cfg=cfg)
|
[
"viktor.danyliuk@databand.ai"
] |
viktor.danyliuk@databand.ai
|
1d431aacfd20db6b9880ccd00e7e3827b2259f48
|
a80d902906c902a8ebb15956f96743d2904bd5b0
|
/A1/Task_b.py
|
7c14964e990b59dd65f7085c1fab96541d71de0d
|
[] |
no_license
|
ziquanliu/ML_PA1
|
f0b2e8bb0809e5431800974ee471487200385dc0
|
e507c56a1b6c98ddd68435cd6477659a148319f8
|
refs/heads/master
| 2021-07-09T08:54:15.935920
| 2017-10-09T11:14:05
| 2017-10-09T11:14:05
| 106,076,521
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,830
|
py
|
import regclass as rgr
import numpy as np
import scipy.io as scio
import pickle
data=scio.loadmat('poly_data.mat')
polyx=data['polyx']#1*100
polyy=data['polyy']#100*1
sampx=data['sampx']#1*50
sampy=data['sampy']#50*1
thtrue=data['thtrue']#6*1
#-------------------------------------------Training Part---------------------------------------------------------------
dim_Fea=5 #Feature dimension
variance=5 #variance of data
Fea_Trn=rgr.PolyFea(sampx,dim_Fea)
regress=rgr.Regressor(Fea_Trn,sampy,variance)
rgl_lambda=1.8 #hyperparameter of regulized LS
lasso_t=1 #hyperparameter of LASSO
Bay_alpha=1 #hyperparameter of Bayes
theta={'LSE':regress.LSE_Reg(),'Rgl':regress.Regul_Reg(rgl_lambda),'Las':regress.LASSO_Reg(lasso_t),
'Rob':regress.Robust_Reg()}
Bayes_theta=regress.Bay_Reg(Bay_alpha)
#--------------------------------------------Testing Part---------------------------------------------------------------
Fea_Tst=rgr.PolyFea(polyx,dim_Fea)
Test_Pred={}
Train_Pred={}
for key in theta:
Test_Pred[key] = rgr.nonBayes_Pred(theta[key],Fea_Tst)
Train_Pred[key] = rgr.nonBayes_Pred(theta[key], Fea_Trn)
Bayes_Test_Pred = rgr.Bayes_Pred(Fea_Tst,Bayes_theta)
Bayes_Train_Pred = rgr.Bayes_Pred(Fea_Trn,Bayes_theta)
#---------------------------------------------Plot Part-----------------------------------------------------------------
#non Bayes
for key in theta:
rgr.nonBayes_Plt(sampx ,sampy, Train_Pred[key],polyx,polyy,Test_Pred[key],key)
#Bayes
rgr.Bayes_Ply(sampx,sampy,Bayes_Train_Pred,polyx,polyy,Bayes_Test_Pred)
#----------------------------------------Error Analysis Part------------------------------------------------------------
Pred_Error=rgr.Pred_All_Err(polyy,Test_Pred,Bayes_Test_Pred)
f1=open('Task B Predict Error.txt','wb')
pickle.dump(Pred_Error,f1)
f1.close()
|
[
"ziquanliu2-c@my.cityu.edu.hk"
] |
ziquanliu2-c@my.cityu.edu.hk
|
2ca91a62000f3a0a8a742b4823bc3a283b75e62c
|
e166d1fb52a1d8bbd3e961a85a605734b88d9eed
|
/web_app/rest_api/__init__.py
|
19a54bb33261b2874222bb2ca6d8b46df3fd2060
|
[] |
no_license
|
sampathdechu/fullstack_docker_compose
|
88a18b2a98adfcaa1639c79c157588cf12b7fde7
|
1e7156a15af5ee5a8b5d6e4164707398d21b74d2
|
refs/heads/master
| 2023-03-24T05:57:47.938688
| 2021-03-22T05:09:56
| 2021-03-22T05:09:56
| 292,256,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 260
|
py
|
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from flask_restplus import Api
from .namespace_pipeline import ns_1
api = Api(
title='SlackBot',
version='1.0',
description='AIOPs Slack Bot`',
)
api.add_namespace(ns_1)
|
[
"sampath@Sampaths-MacBook-Pro.local"
] |
sampath@Sampaths-MacBook-Pro.local
|
b7dedd97ddd99fb248f382159b97035a46f7096e
|
5b763e333caf91664a8b67154325826a67cc750f
|
/news_pipeline/tf_idf_test.py
|
cefc5b09875b5bcbb873e582cafdb32aa68c316d
|
[] |
no_license
|
Edison201314/Final-ImageSearch-
|
3ddfd28c7f2b8883c25605e07d0ebfb517717b92
|
3d2a7c9820e69a24408d06a54a22da17fb9c1714
|
refs/heads/master
| 2021-01-21T15:58:10.395027
| 2017-05-20T05:37:43
| 2017-05-20T05:37:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 374
|
py
|
rom sklearn.feature_extraction.text import TfidfVectorizer
doc1 = "I like apples. I like oranges too"
doc2 = "I love apples. I hate doctors"
doc3 = "An apple a day keeps the doctor away"
doc4 = "Never compare an apple to an orang"
documents = [doc1, doc2, doc3, doc4]
tfidf = TfidfVectorizer().fit_transform(documents)
pairwise_sim = tfidf * tfidf.T
print pairwise_sim.A
|
[
"xiexin201314@gmail.com"
] |
xiexin201314@gmail.com
|
13db8a97123f7b644626f52cddadc7e7885f894f
|
193abf43f68416cc1bcb6d52ac9e0b2d98d25831
|
/src/problems/0052_permuted_multiples/v1.py
|
cc4fde68ba0c31fe2424d525ecd7ee43df02c1dd
|
[] |
no_license
|
david-sk/projecteuler
|
ee8d739daa38572fd37a6a83f0be5d3b6044a341
|
7b92872ced634299d89ce378a58618dbed6c842a
|
refs/heads/master
| 2023-04-01T06:06:02.624469
| 2021-04-03T10:54:42
| 2021-04-03T10:54:42
| 274,217,736
| 0
| 0
| null | 2021-03-14T19:10:40
| 2020-06-22T18:49:56
|
Rust
|
UTF-8
|
Python
| false
| false
| 734
|
py
|
#
# Permuted multiples, v1
# https://projecteuler.net/problem=52
#
# It can be seen that the number, 125874, and its double, 251748, contain exactly the same digits,
# but in a different order.
# Find the smallest positive integer, x, such that 2x, 3x, 4x, 5x, and 6x, contain the same digits.
#
from collections import Counter
def get_digits_counter(n):
digits = []
while n > 0:
digits.append(n % 10)
n //= 10
return Counter(digits)
def run():
n = 1
while not (
get_digits_counter(2 * n)
== get_digits_counter(3 * n)
== get_digits_counter(4 * n)
== get_digits_counter(5 * n)
== get_digits_counter(6 * n)
):
n += 1
print('Result:', n)
|
[
"david.desmeurs@slagkryssaren.com"
] |
david.desmeurs@slagkryssaren.com
|
38c5b8bd8e89453324ed6f797ab08f8e8baef797
|
bb2350513357d54a7dff3e62c394611785306e19
|
/output/generate_tables.py
|
20afca7ad7a5ff2b2f500e6e34e0634ded4c41be
|
[] |
no_license
|
trevstanhope/python-cvme
|
8cc1812cd02da71ef829991a6212707ee2c5dab6
|
0c79c08cc39dcce15bbbe5bb8f47d31eacca8a02
|
refs/heads/master
| 2020-12-31T07:43:47.733082
| 2017-05-01T15:17:26
| 2017-05-01T15:17:26
| 49,857,015
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 83,681
|
py
|
import pandas as pd
import glob
import os
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy.signal as sig
import scipy as sp
import time
import scipy.stats as stats
from pykalman import KalmanFilter
import matplotlib
import sklearn as skl
#font = {
# "family" : 'normal',
# "size" : 14
#}
matplotlib.rcParams['ytick.labelsize'] = 14
matplotlib.rcParams['xtick.labelsize'] = 14
def lin_fit(x,y):
fitfunc = lambda m, x: m * x
errfunc = lambda p,x,y: fitfunc(p,x) - y
a_init = 1.0
a, s = sp.optimize.leastsq(errfunc, np.array(a_init), args=(x,y))
p = np.poly1d([a,0])
yhat = p(x)
ybar = np.sum(y) / float(len(y))
ssreg = np.sum((yhat - ybar)**2)
sstot = np.sum((y - ybar)**2)
rsquare = ssreg / sstot
ttest, pval = sp.stats.ttest_1samp(np.abs(x - y), 0.0)
return a[0], rsquare, ttest, pval
def poly2d(df, kx='srtk', ky='se'):
x = np.array(df[kx][~np.isnan(df[ky])])
y = np.abs(np.array(df[ky][~np.isnan(df[ky])]))
coef = np.polyfit(x,y,2)
X = np.linspace(1,5)
Y = np.polyval(coef, X)
return X, Y
def calc_dist(pt1, pt2, a=0.007344, b=-2.40728224, c=246.317244):
x1,y1 = pt1
x2,y2 = pt2
d = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)
X = a*d**2 + b*d + c
return X
def moving_average(interval, window_size):
window = np.ones(int(window_size))/float(window_size)
return np.convolve(interval, window, 'same')
def moving_median(interval, window_size):
return sig.medfilt(interval, kernel_size=window_size)
def rmse(predictions, targets):
return np.nanmean(abs(np.array(predictions) - np.array(targets)))
return np.sqrt(np.nanmean(((np.array(predictions) - np.array(targets)) ** 2.0)))
def p95(predictions, targets):
if len(predictions) == 0 or len(targets) == 0:
return np.NAN
else:
abs_err = np.abs(np.array(predictions[~np.isnan(targets)]) - np.array(targets[~np.isnan(targets)]))
return np.percentile(abs_err, 95)
def p95_by_group(y, x, bins, lims):
offset = (lims[1] - lims[0]) / float(bins)
groups_a = np.linspace(lims[0], lims[1] - offset, num=bins)
groups_b = np.linspace(lims[0] + offset, lims[1], num=bins)
groups = zip(groups_a, groups_b)
P95 = []
N = []
for (a,b) in groups:
N.append(np.sum(np.logical_and(y>a, y<=b)))
Y = y[np.logical_and(y>a, y<=b)]
X = x[np.logical_and(y>a, y<=b)]
v = p95(Y, X)
if np.isnan(v): print "NAN WARNING in RMSE by Group!"
P95.append(v)
return P95, (groups_a + groups_b) / 2.0
def rmse_by_group(y, x, bins, lims):
offset = (lims[1] - lims[0]) / bins
groups_a = np.linspace(lims[0], lims[1] - offset, num=bins)
groups_b = np.linspace(lims[0] + offset, lims[1], num=bins)
groups = zip(groups_a, groups_b)
RMSE = []
N = []
for (a,b) in groups:
N.append(np.sum(np.logical_and(y>a, y<=b)))
Y = y[np.logical_and(y>a, y<=b)]
X = x[np.logical_and(y>a, y<=b)]
v = rmse(Y, X)
if np.isnan(v): print "NAN WARNING in RMSE by Group!"
RMSE.append(v)
return RMSE, (groups_a + groups_b) / 2.0
def histogram_by_lims(y, x, lims, bins, minmax):
a = lims[0]
b = lims[1]
return np.histogram(y[np.logical_and(y>a, y<b)] - x[np.logical_and(y>a, y<b)], bins, minmax)
def normalize(x):
norm=np.linalg.norm(x)
if norm==0:
return x
return x/norm
# Iterate through all trials
if __name__ == '__main__':
SPEED_RANGE = [1,5]
NUM_GROUPS = 4
SUB_GROUPS = 1
SMOOTHING_WINDOW = 5
ORB_TREATMENTS = ['500','750','250']
SURF_TREATMENTS = ['2000', '1500', '1000']
SIFT_TREATMENTS = ['500', '750', '250']
SURFACES = ['asphault', 'grass', 'gravel', 'residue', 'corn', 'hay']
ALGORITHMS = ['SURFEx',
'SURFEx_N2',
'USURFEx',
'USURFEx_N2',
'ORB_HAMMING',
'ORB_HAMMING_N2',
'ORB_HAMMINGCL',
'ORB_HAMMINGCL_N2',
'SIFT',
'SIFT_N2'
]
DEZOOM = 1.01
LINE_TYPES = {
'125' : 'dotted',
'250' : 'dotted',
'500' : 'dashdot',
'750' : 'solid',
'1000' : 'dotted',
'1500' : 'dashdot',
'2000' : 'solid',
'3000' : 'solid'
}
CORR_FACTORS = {
'asphault': 1.00,
'gravel': 1.00,
'residue': 0.99,
'grass': 0.95,
'hay': 0.93,
'corn' : 0.96,
'soy' : 0.65
}
TRIAL_BLACKLIST = ['gravel-1', 'gravel-7',
'corn-1', 'corn-3', 'corn-4', 'corn-7', 'corn-8', 'corn-11',
'hay-1']
HEADERS = ['rtk','v', 't', 'm', 'p', 'n'] # Column headers (must match the .csv headers!)
TREATMENTS = [tuple(d.split('-')) for d in os.listdir(".") if os.path.isdir(d)]
TRIALS = [f.split('/')[-1].split('.')[0] for f in glob.glob("../data/*.csv")]
HATCHES = {
"USURF" : "",
"USURF_N2" : "",
"SURF" : "",
"SURF_N2" : "",
"USURFEx" : "",
"USURFEx_N2" : "",
"ORB_HAMMING" : "",
"ORB_HAMMING_N2" : "",
"ORB_HAMMING2" : "",
"ORB_HAMMING2_N2" : "",
"ORB_HAMMINGCL" : "",
"ORB_HAMMINGCL_N2" : "",
"ORB_HAMMINGEQ" : "",
"ORB_HAMMINGEQ_N2" : "",
"ORB_L2" : "",
"ORB_L2_N2" : "",
"ORB_FAST" : "",
"ORB_FAST_N2" : "",
"SIFT" : "",
"SIFT_N2" : ""
}
#COLORS = {
# "USURF" : "darkred",
# "USURF_N2" : "red",
# "SURF" : "darkred",
# "SURF_N2" : "red",
# "SURFEx" : "darkred",
# "SURFEx_N2" : "red",
# "USURFEx" : "darkorange",
# "USURFEx_N2" : "yellow",
# "ORB_HAMMING" : "green",
# "ORB_HAMMING_N2" : "lime",
# "ORB_HAMMING2" : "blue",
# "ORB_HAMMING2_N2" : "royalblue",
# "ORB_HAMMINGCL" : "blue",
# "ORB_HAMMINGCL_N2" : "royalblue",
# "ORB_HAMMINGEQ" : "blue",
# "ORB_HAMMINGEQ_N2" : "royalblue",
# "ORB_L2" : "orange",
# "ORB_L2_N2" : "darkorange",
# "ORB_FAST" : "cyan",
# "ORB_FAST_N2" : "darkcyan",
# "SIFT" : "purple",
# "SIFT_N2" : "magenta",
# "RTK" : "red"
#}
COLORS = {
"USURF" : "0.05",
"USURF_N2" : "0.50",
"SURF" : "0.05",
"SURF_N2" : "0.50",
"SURFEx" : "0.05",
"SURFEx_N2" : "0.50",
"USURFEx" : "0.05",
"USURFEx_N2" : "0.50",
"ORB_HAMMING" : "0.05",
"ORB_HAMMING_N2" : "0.50",
"ORB_HAMMING2" : "0.05",
"ORB_HAMMING2_N2" : "0.50",
"ORB_HAMMINGCL" : "0.05",
"ORB_HAMMINGCL_N2" : "0.50",
"ORB_HAMMINGEQ" : "0.05",
"ORB_HAMMINGEQ_N2" : "0.50",
"ORB_L2" : "0.05",
"ORB_L2_N2" : "0.50",
"ORB_FAST" : "0.05",
"ORB_FAST_N2" : "0.50",
"SIFT" : "0.05",
"SIFT_N2" : "0.50",
"RTK" : "0.05"
}
MARKER_SIZE = 60
MARKERS = {
"USURF" : ">",
"USURF_N2" : "<",
"SURF" : ">",
"SURF_N2" : "<",
"SURFEx" : ">",
"SURFEx_N2" : "<",
"USURFEx" : "^",
"USURFEx_N2" : "v",
"ORB_HAMMING" : "s",
"ORB_HAMMING_N2" : "D",
"ORB_HAMMING2" : "d",
"ORB_HAMMING2_N2" : "*",
"ORB_HAMMINGCL" : "o",
"ORB_HAMMINGCL_N2" : "x",
"ORB_HAMMINGEQ" : "o",
"ORB_HAMMINGEQ_N2" : "x",
"ORB_L2" : "o",
"ORB_L2_N2" : "x",
"ORB_FAST" : "o",
"ORB_FAST_N2" : "x",
"SIFT" : "p",
"SIFT_N2" : "H",
"RTK" : "+"
}
SURFACE_LABELS = {
'asphault' : 'Asphalt',
'gravel' : 'Gravel',
'residue' : 'Seedlings',
'grass' : 'Turf Grass',
'corn' : 'Corn Residue',
'hay' : 'Pasture'
}
ALGORITHM_LABELS = {
"USURF" : "SURF (cross-check)",
"USURF_N2" : "SURF (ratio-test)",
"SURF" : "SURF (cross-check)",
"SURF_N2" : "SURF (ratio-test)",
"SURFEx" : "SURF (cross-check)",
"SURFEx_N2" : "SURF (ratio-test)",
"USURFEx" : "U-SURF (cross-check)",
"USURFEx_N2" : "U-SURF (ratio-test)",
"ORB_HAMMING" : "ORB (cross-check)",
"ORB_HAMMING_N2" : "ORB (ratio-test)",
"ORB_HAMMING2" : "CLORB (cross-check)",
"ORB_HAMMING2_N2" : "CLORB (ratio-test)",
"ORB_HAMMINGCL" : "CLORB (cross-check)",
"ORB_HAMMINGCL_N2" : "CLORB (ratio-test)",
"ORB_HAMMINGEQ" : "EORB (cross-check)",
"ORB_HAMMINGEQ_N2" : "EORB (ratio-test)",
"ORB_L2" : "orange",
"ORB_L2_N2" : "darkorange",
"ORB_FAST" : "cyan",
"ORB_FAST_N2" : "darkcyan",
"SIFT" : "SIFT (cross-check)",
"SIFT_N2" : "SIFT (ratio-test)"
}
LEGEND = [mpatches.Patch(color=COLORS[alg], label=ALGORITHM_LABELS[alg]) for alg in ALGORITHMS]
# Make dictionaries to load csv-files into DataFrames by algorithm
# d_usurfex = {
# 1000 : {
# asphault-1 : <df>
# ...
# }
# }
d_usurfex = { thresh:{} for alg,thresh in TREATMENTS}
d_usurfex_n2 = { thresh:{} for alg,thresh in TREATMENTS}
d_usurf = { thresh:{} for alg,thresh in TREATMENTS}
d_usurf_n2 = { thresh:{} for alg,thresh in TREATMENTS}
d_orb_hamming = { thresh:{} for alg,thresh in TREATMENTS}
d_orb_hamming_n2 = { thresh:{} for alg,thresh in TREATMENTS}
d_orb_hamming2 = { thresh:{} for alg,thresh in TREATMENTS}
d_orb_hamming2_n2 = { thresh:{} for alg,thresh in TREATMENTS}
d_orb_fast = { thresh:{} for alg,thresh in TREATMENTS}
d_orb_fast_n2 = { thresh:{} for alg,thresh in TREATMENTS}
d_sift = { thresh:{} for alg,thresh in TREATMENTS}
d_sift_n2 = { thresh:{} for alg,thresh in TREATMENTS}
d_orb_l2 = { thresh:{} for alg,thresh in TREATMENTS}
# Make dictionaries to sort DataFrames by surface
# d_asphault = {
# SURF : [<df>, ... ],
# ...
# }
d_asphault = {alg: [] for alg in ALGORITHMS}
d_gravel = {alg: [] for alg in ALGORITHMS}
d_grass = {alg: [] for alg in ALGORITHMS}
d_residue = {alg: [] for alg in ALGORITHMS}
d_corn = {alg: [] for alg in ALGORITHMS}
d_hay = {alg: [] for alg in ALGORITHMS}
# Kalman Filter
kf = KalmanFilter(transition_matrices=[[1,1],[0,1]], transition_covariance=0.01 * np.eye(2))
print("===============================================================")
output = open("summary.csv", "w")
output.write("alg,thresh,surf,t_num,hz,pts,rmse,rmse_raw,rmse_1,rmse_2,rmse_3,rmse_4,p95,p95_raw,p95_1,p95_2,p95_3,p95_4,nans,slope,r_value,p_value,std_err\n")
for alg,thresh in TREATMENTS:
for f in glob.glob(os.path.join(alg + '-' + thresh,'*.csv')):
trial = f.split('/')[-1].split('.')[0]
surface = trial.split('-')[0]
trial_num = trial.split('-')[1]
if (surface in SURFACES) and (alg in ALGORITHMS) and not (trial in TRIAL_BLACKLIST):
print alg, thresh, surface, trial_num
df = pd.DataFrame.from_csv(f)
rtk = df['rtk'] / 3.6
hz = df['hz']
v = df['v'] * DEZOOM / 3.6
nans = np.count_nonzero(np.isnan(v))
cv = v * CORR_FACTORS[surface]
mask = np.isnan(cv)
#cv[mask] = 0 # make nans zero
cv[mask] = np.interp(np.flatnonzero(mask), np.flatnonzero(~mask), cv[~mask]) # linterp nans
shz = sig.medfilt(hz, kernel_size=SMOOTHING_WINDOW)
srtk = sig.savgol_filter(rtk, SMOOTHING_WINDOW, 1)
sv = sig.medfilt(cv, kernel_size=SMOOTHING_WINDOW)
se = sv - srtk
d = 100 * ((sig.medfilt(cv, kernel_size=SMOOTHING_WINDOW) / srtk) - 1)
dv = np.gradient(sv)
drtk = np.gradient(srtk) * 25.0
h, b = histogram_by_lims(v[~np.isnan(v)], rtk[~np.isnan(v)], SPEED_RANGE, 100, [-1,1])
sh, sb = histogram_by_lims(sv, srtk, SPEED_RANGE, 100, [-1,1])
RMSE_g, groups = rmse_by_group(sv, srtk, NUM_GROUPS, SPEED_RANGE)
RMSE = rmse(sv, srtk)
RMSE_raw = rmse(v, rtk)
hz_mean = np.mean(hz)
points = len(hz)
P95_g, groups = p95_by_group(sv, srtk, NUM_GROUPS, SPEED_RANGE)
P95 = p95(sv, srtk)
P95_raw = p95(v[~np.isnan(v)], rtk[~np.isnan(v)])
#slope, intercept, r_value, p_value, std_err = stats.linregress(srtk,sv)
slope, r_value, t_test, p_value = lin_fit(srtk,sv)
df = df.join(pd.DataFrame({'d':d}))
df = df.join(pd.DataFrame({'h':h}))
df = df.join(pd.DataFrame({'b':b[:-1]}))
df = df.join(pd.DataFrame({'sh':sh}))
df = df.join(pd.DataFrame({'sb':sb[:-1]}))
df = df.join(pd.DataFrame({'cv':cv}))
df = df.join(pd.DataFrame({'dv':dv}))
df = df.join(pd.DataFrame({'drtk':drtk}))
df = df.join(pd.DataFrame({'shz':shz}))
df = df.join(pd.DataFrame({'sv':sv}))
df = df.join(pd.DataFrame({'srtk':srtk}))
df = df.join(pd.DataFrame({'se':se}))
output.write(','.join([str(i) for i in [alg,thresh,surface,trial_num,hz_mean,points,
RMSE, RMSE_raw,','.join([str(f) for f in RMSE_g]),
P95, P95_raw, ','.join([str(f) for f in P95_g]),
nans, slope, r_value, p_value, t_test]] + ['\n']))
# Sort by algorithm
if alg == 'USURFEx':
d_usurfex[thresh].update({trial : df})
elif alg == 'USURFEx_N2':
d_usurfex_n2[thresh].update({trial : df})
elif alg == 'SURFEx':
d_usurf[thresh].update({trial : df})
elif alg == 'SURFEx_N2':
d_usurf_n2[thresh].update({trial : df})
elif alg == 'ORB_HAMMING':
d_orb_hamming[thresh].update({trial : df})
elif alg == 'ORB_HAMMING_N2':
d_orb_hamming_n2[thresh].update({trial : df})
elif alg == 'ORB_HAMMINGCL':
d_orb_hamming2[thresh].update({trial : df})
elif alg == 'ORB_HAMMINGCL_N2':
d_orb_hamming2_n2[thresh].update({trial : df})
elif alg == 'SIFT':
d_sift[thresh].update({trial : df})
elif alg == 'SIFT_N2':
d_sift_n2[thresh].update({trial : df})
## elif alg == 'ORB_FAST':
## d_orb_fast[thresh].update({trial : df})
## elif alg == 'ORB_FAST_N2':
## d_orb_fast_n2[thresh].update({trial : df})
## elif alg == 'ORB_L2':
## d_orb_l2[thresh].update({trial : df})
else:
raise Exception("Bad algorithm: %s-%s" % (alg, thresh))
# Sort by surface
if surface == 'corn':
d_corn[alg].append(df)
elif surface == 'hay':
d_hay[alg].append(df)
elif surface == 'grass':
d_grass[alg].append(df)
elif surface == 'residue':
d_residue[alg].append(df)
elif surface == 'asphault':
d_asphault[alg].append(df)
elif surface == 'gravel':
d_gravel[alg].append(df)
else:
raise Exception("Bad surface: %s" % surface)
## Figure #1
print("===============================================================")
try:
# Good Example
fig = plt.figure()
fig.patch.set_facecolor('white')
fig.add_subplot(1,3,1)
trial = 'asphault-3'
key = 'sv'
plt.plot(d_orb_hamming2[ORB_TREATMENTS[0]][trial][key], c=COLORS['ORB_HAMMING2'])
plt.plot(d_usurf[SURF_TREATMENTS[0]][trial]['srtk'], c=COLORS['RTK'], linestyle='dotted', linewidth=4)
plt.ylim([1,5.5])
plt.yticks([1,2,3,4,5],fontsize=14)
plt.ylabel("Travel Speed (m/s)", fontsize=14)
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plt.title("Asphalt, Trial #3")
# Bad Example
fig.add_subplot(1,3,2)
trial = 'corn-5'
key = 'sv'
line_cv, = plt.plot(d_orb_hamming2[ORB_TREATMENTS[0]][trial][key], label="CLORB", c=COLORS['ORB_HAMMING2'])
line_rtk, = plt.plot(d_usurf[SURF_TREATMENTS[0]][trial]['srtk'], label="RTK GNSS", c=COLORS['RTK'], linestyle='dotted', linewidth=4)
plt.ylim([1,5.5])
plt.yticks([1,2,3,4,5],fontsize=14)
plt.title("Corn Residue, Trial #2")
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
# Legend
fig.add_subplot(1,3,3)
plt.axis('off')
plt.legend(handles=[line_rtk, line_cv])
fig.show()
except Exception as e:
print str(e)
## Figure: Linear Regressian (By Surface)
print("===============================================================")
try:
fig = plt.figure()
fig.patch.set_facecolor('white')
output = open('linregress.csv', 'w')
output.write('algorithm,surface,slope,r_value,p_value,t_test,p95\n')
# Gravel Subplot
fig.add_subplot(3,3,1)
surface='gravel'
for alg, trials in d_gravel.iteritems(): # Gravel
if len(trials) == 0: print "WARNING: %s is empty!" % alg
else:
composite = pd.concat(trials)
x = composite['srtk'][~np.isnan(composite['sv'])]
y = composite['sv'][~np.isnan(composite['sv'])]
slope, r_value, t_test, p_value = lin_fit(x,y)
P95 = p95(composite['sv'], composite['srtk'])
newline = "%s,%s,%f,%f,%f,%f,%f\n" % (alg,surface,slope,r_value, p_value,t_test,P95)
output.write(newline)
i = np.arange(1,6)
Y = np.polyval([slope], i)
plt.plot(i, Y, c=COLORS[alg])
plt.scatter(x, y, c=COLORS[alg], s=1, alpha=0.1, edgecolors='none')
plt.axis([1, 5, 1, 5])
plt.title(SURFACE_LABELS[surface])
# Asphault subplot
fig.add_subplot(3,3,2)
surface='asphault'
for alg, trials in d_asphault.iteritems(): # Asphault
if len(trials) == 0: print "WARNING: %s is empty!" % alg
else:
composite = pd.concat(trials)
x = composite['srtk'][~np.isnan(composite['sv'])]
y = composite['sv'][~np.isnan(composite['sv'])]
slope, r_value, t_test, p_value = lin_fit(x,y)
P95 = p95(composite['sv'], composite['srtk'])
newline = "%s,%s,%f,%f,%f,%f,%f\n" % (alg,surface,slope,r_value, p_value,t_test,P95)
output.write(newline)
i = np.arange(1,6)
Y = np.polyval([slope], i)
plt.plot(i, Y, c=COLORS[alg])
plt.scatter(x, y, c=COLORS[alg], s=1, alpha=0.1, edgecolors='none')
plt.axis([1, 5, 1, 5])
plt.title(SURFACE_LABELS[surface])
# Grass Subplot
fig.add_subplot(3,3,4)
surface='grass'
for alg, trials in d_grass.iteritems(): # Grass
if len(trials) == 0: print "WARNING: %s is empty!" % alg
else:
composite = pd.concat(trials)
x = composite['srtk'][~np.isnan(composite['sv'])]
y = composite['sv'][~np.isnan(composite['sv'])]
slope, r_value, t_test, p_value = lin_fit(x,y)
P95 = p95(composite['sv'], composite['srtk'])
newline = "%s,%s,%f,%f,%f,%f,%f\n" % (alg,surface,slope,r_value, p_value,t_test,P95)
output.write(newline)
i = np.arange(1,6)
Y = np.polyval([slope], i)
plt.plot(i, Y, c=COLORS[alg])
plt.scatter(x, y, c=COLORS[alg], s=1, alpha=0.1, edgecolors='none')
plt.axis([1, 5, 1, 5])
plt.title(SURFACE_LABELS[surface])
# Residue Subplot
surface='residue'
fig.add_subplot(3,3,5)
for alg, trials in d_residue.iteritems(): # Residue
if len(trials) == 0: print "WARNING: %s is empty!" % alg
else:
composite = pd.concat(trials)
x = composite['srtk'][~np.isnan(composite['sv'])]
y = composite['sv'][~np.isnan(composite['sv'])]
slope, r_value, t_test, p_value = lin_fit(x,y)
P95 = p95(composite['sv'], composite['srtk'])
newline = "%s,%s,%f,%f,%f,%f,%f\n" % (alg,surface,slope,r_value, p_value,t_test,P95)
output.write(newline)
i = np.arange(1,6)
Y = np.polyval([slope], i)
plt.plot(i, Y, c=COLORS[alg])
plt.scatter(x, y, c=COLORS[alg], s=1, alpha=0.1, edgecolors='none')
plt.axis([1, 5, 1, 5])
plt.title(SURFACE_LABELS[surface])
# Corn Subplot
surface='corn'
fig.add_subplot(3,3,7)
for alg, trials in d_corn.iteritems(): # Corn
if len(trials) == 0: print "WARNING: %s is empty!" % alg
else:
composite = pd.concat(trials)
x = composite['srtk'][~np.isnan(composite['sv'])]
y = composite['sv'][~np.isnan(composite['sv'])]
slope, r_value, t_test, p_value = lin_fit(x,y)
P95 = p95(composite['sv'], composite['srtk'])
newline = "%s,%s,%f,%f,%f,%f,%f\n" % (alg,surface,slope,r_value, p_value,t_test,P95)
output.write(newline)
i = np.arange(1,6)
Y = np.polyval([slope], i)
plt.plot(i, Y, c=COLORS[alg])
plt.scatter(x, y, c=COLORS[alg], s=1, alpha=0.1, edgecolors='none')
plt.axis([1, 5, 1, 5])
plt.title(SURFACE_LABELS[surface])
# Hay Subplot
surface='hay'
ax = fig.add_subplot(3,3,8)
for alg, trials in d_hay.iteritems(): # hay
if len(trials) == 0: print "WARNING: %s is empty!" % alg
else:
composite = pd.concat(trials)
x = composite['srtk'][~np.isnan(composite['sv'])]
y = composite['sv'][~np.isnan(composite['sv'])]
slope, r_value, t_test, p_value = lin_fit(x,y)
P95 = p95(composite['sv'], composite['srtk'])
newline = "%s,%s,%f,%f,%f,%f,%f\n" % (alg,surface,slope,r_value, p_value,t_test,P95)
output.write(newline)
i = np.arange(1,6)
Y = np.polyval([slope], i)
plt.plot(i, Y, c=COLORS[alg])
plt.scatter(x, y, c=COLORS[alg], s=1, alpha=0.1, edgecolors='none')
plt.axis([1, 5, 1, 5])
plt.title(SURFACE_LABELS[surface])
# Legend
fig.add_subplot(3,3,6)
plt.axis('off')
plt.legend(handles=LEGEND)
fig.show()
except Exception as e:
print str(e)
## Figure: QQ-Plot (By Surface)
print("===============================================================")
try:
fig = plt.figure()
fig.patch.set_facecolor('white')
# Gravel Subplot
fig.add_subplot(3,2,1)
surface='gravel'
for alg, trials in d_gravel.iteritems(): # Gravel
composite = pd.concat(trials)
nan = np.isnan(composite['sv'])
x = composite['sv'][~nan]
y = composite['srtk'][~nan]
(osm, osr), (slope, intercept, r) = stats.probplot(x - y, dist="norm")
plt.scatter(osm, osr, s=1, edgecolors='none', c=COLORS[alg])
plt.axis([-4,4,-0.3,0.3])
plt.xticks([-4,-3,-2,-1,0,1,2,3,4], fontsize=14)
plt.yticks([-0.2,0.0,0.2], fontsize=14)
plt.title(SURFACE_LABELS[surface])
except:
pass
try:
# Asphault subplot
fig.add_subplot(3,2,2)
surface='asphault'
for alg, trials in d_asphault.iteritems(): # Asphault
composite = pd.concat(trials)
nan = np.isnan(composite['sv'])
x = composite['sv'][~nan]
y = composite['srtk'][~nan]
(osm, osr), (slope, intercept, r) = stats.probplot(x - y, dist="norm")
plt.scatter(osm, osr, s=1, edgecolors='none', c=COLORS[alg])
plt.axis([-4,4,-0.3,0.3])
plt.xticks([-4,-3,-2,-1,0,1,2,3,4], fontsize=14)
plt.yticks([-0.2,0.0,0.2], fontsize=14)
plt.title(SURFACE_LABELS[surface])
except:
pass
try:
# Grass Subplot
fig.add_subplot(3,2,3)
surface='grass'
for alg, trials in d_grass.iteritems(): # Grass
composite = pd.concat(trials)
nan = np.isnan(composite['sv'])
x = composite['sv'][~nan]
y = composite['srtk'][~nan]
(osm, osr), (slope, intercept, r) = stats.probplot(x - y, dist="norm")
plt.scatter(osm, osr, s=1, edgecolors='none', c=COLORS[alg])
plt.axis([-4,4,-0.3,0.3])
plt.xticks([-4,-3,-2,-1,0,1,2,3,4], fontsize=14)
plt.yticks([-0.2,0.0,0.2], fontsize=14)
plt.title(SURFACE_LABELS[surface])
plt.ylabel('Ordered Median Error (m/s)', fontsize=14)
except:
pass
try:
# Residue Subplot
surface='residue'
fig.add_subplot(3,2,4)
for alg, trials in d_residue.iteritems(): # Residue
composite = pd.concat(trials)
nan = np.isnan(composite['sv'])
x = composite['sv'][~nan]
y = composite['srtk'][~nan]
(osm, osr), (slope, intercept, r) = stats.probplot(x - y, dist="norm")
plt.scatter(osm, osr, s=1, edgecolors='none', c=COLORS[alg])
plt.axis([-4,4,-0.3,0.3])
plt.xticks([-4,-3,-2,-1,0,1,2,3,4], fontsize=14)
plt.yticks([-0.2,0.0,0.2], fontsize=14)
plt.title(SURFACE_LABELS[surface])
except:
pass
try:
# Corn Subplot
surface='corn'
fig.add_subplot(3,2,5)
for alg, trials in d_corn.iteritems(): # Corn
composite = pd.concat(trials)
nan = np.isnan(composite['sv'])
x = composite['sv'][~nan]
y = composite['srtk'][~nan]
(osm, osr), (slope, intercept, r) = stats.probplot(x - y, dist="norm")
plt.scatter(osm, osr, s=1, edgecolors='none', c=COLORS[alg])
plt.axis([-4,4,-0.3,0.3])
plt.xticks([-4,-3,-2,-1,0,1,2,3,4], fontsize=14)
plt.yticks([-0.2,0.0,0.2], fontsize=14)
plt.title(SURFACE_LABELS[surface])
plt.xlabel('Quantiles', fontsize=14)
except:
pass
try:
# Hay Subplot
surface='hay'
fig.add_subplot(3,2,6)
for alg, trials in d_hay.iteritems(): # hay
composite = pd.concat(trials)
nan = np.isnan(composite['sv'])
x = composite['sv'][~nan]
y = composite['srtk'][~nan]
(osm, osr), (slope, intercept, r) = stats.probplot(x - y, dist="norm")
plt.scatter(osm, osr, s=1, edgecolors='none', c=COLORS[alg])
plt.axis([-4,4,-0.3,0.3])
plt.xticks([-4,-3,-2,-1,0,1,2,3,4], fontsize=14)
plt.yticks([-0.2,0.0,0.2], fontsize=14)
plt.title(SURFACE_LABELS[surface])
plt.xlabel('Quantiles', fontsize=14)
except:
pass
try:
# Legend
#fig.add_subplot(3,3,6)
#plt.axis('off')
#plt.legend(handles=LEGEND)
fig.show()
except Exception as e:
print str(e)
## Figure 3. Normalized Histogram (By Surface)
print("===============================================================")
fig = plt.figure()
fig.patch.set_facecolor('white')
algorithm = 'ORB_HAMMINGCL'
# Gravel
surface = 'gravel'
fig.add_subplot(3,2,1)
for alg, trials in d_gravel.iteritems():
if len(trials) == 0: print "WARNING: %s is empty!" % alg
elif alg == algorithm:
# Corrected
h = [df['sh'][~np.isnan(df['sh'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['sb'][~np.isnan(df['sb'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg])
# Raw
h = [df['h'][~np.isnan(df['h'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['b'][~np.isnan(df['b'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg], linestyle='dashed')
plt.plot(np.zeros(2), np.linspace(0,1,2), c='black')
plt.axis([-1, 1, 0, 1])
plt.xticks([-0.5, 0.0, 0.5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title(SURFACE_LABELS[surface])
# Asphault
surface = 'asphault'
fig.add_subplot(3,2,2)
for alg, trials in d_asphault.iteritems():
if len(trials) == 0: print "WARNING: %s is empty!" % alg
elif alg == algorithm:
h = [df['sh'][~np.isnan(df['sh'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['sb'][~np.isnan(df['sb'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg])
# Raw
h = [df['h'][~np.isnan(df['h'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['b'][~np.isnan(df['b'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg], linestyle='dashed')
plt.plot(np.zeros(2), np.linspace(0,1,2), c='black')
plt.axis([-1, 1, 0, 1])
plt.xticks([-0.5, 0.0, 0.5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title(SURFACE_LABELS[surface])
# Grass
surface = 'grass'
fig.add_subplot(3,2,3)
for alg, trials in d_grass.iteritems():
if len(trials) == 0: print "WARNING: %s is empty!" % alg
elif alg == algorithm:
h = [df['sh'][~np.isnan(df['sh'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['sb'][~np.isnan(df['sb'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg])
# Raw
h = [df['h'][~np.isnan(df['h'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['b'][~np.isnan(df['b'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg], linestyle='dashed')
plt.plot(np.zeros(2), np.linspace(0,1,2), c='black')
plt.xticks([-0.5, 0.0, 0.5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.axis([-1, 1, 0, 1])
plt.title(SURFACE_LABELS[surface])
plt.ylabel("Normalized Frequency", fontsize=14)
# Residue
surface = 'residue'
fig.add_subplot(3,2,4)
for alg, trials in d_residue.iteritems():
if len(trials) == 0: print "WARNING: %s is empty!" % alg
elif alg == algorithm:
h = [df['sh'][~np.isnan(df['sh'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['sb'][~np.isnan(df['sb'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg])
# Raw
h = [df['h'][~np.isnan(df['h'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['b'][~np.isnan(df['b'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg], linestyle='dashed')
plt.plot(np.zeros(2), np.linspace(0,1,2), c='black')
plt.axis([-1, 1, 0, 1])
plt.xticks([-0.5, 0.0, 0.5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title(SURFACE_LABELS[surface])
# Corn
surface = 'corn'
fig.add_subplot(3,2,5)
for alg, trials in d_corn.iteritems():
if len(trials) == 0: print "WARNING: %s is empty!" % alg
elif alg == algorithm:
h = [df['sh'][~np.isnan(df['sh'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['sb'][~np.isnan(df['sb'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg])
# Raw
h = [df['h'][~np.isnan(df['h'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['b'][~np.isnan(df['b'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg], linestyle='dashed')
plt.plot(np.zeros(2), np.linspace(0,1,2), c='black')
plt.axis([-1, 1, 0, 1])
plt.xticks([-0.5, 0.0, 0.5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title(SURFACE_LABELS[surface])
plt.xlabel("Error (m/s)", fontsize=14)
# Hay
surface = 'hay'
fig.add_subplot(3,2,6)
for alg, trials in d_hay.iteritems():
if len(trials) == 0: print "WARNING: %s is empty!" % alg
elif alg == algorithm:
h = [df['sh'][~np.isnan(df['sh'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['sb'][~np.isnan(df['sb'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg])
# Raw
h = [df['h'][~np.isnan(df['h'])] for df in trials]
h_sum = h[0]
for h_i in h[1:]:
h_sum = np.add(h_sum, h_i)
b = df['b'][~np.isnan(df['b'])]
plt.plot(b, normalize(h_sum), c=COLORS[alg], linestyle='dashed')
plt.plot(np.zeros(2), np.linspace(0,1,2), c='black')
plt.axis([-1, 1, 0, 1])
plt.xticks([-0.5, 0.0, 0.5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.xlabel("Error (m/s)", fontsize=14)
plt.title(SURFACE_LABELS[surface])
fig.show()
## Figure (by Feature-Detector): Scatter of RTK vs Repeatibility
print("===============================================================")
fig = plt.figure()
fig.patch.set_facecolor('white')
# SURF Variants
t = SURF_TREATMENTS[0]
fig.add_subplot(5,2,1)
for trial, df in d_usurf[t].iteritems():
plt.scatter(df['srtk'], df['p'] / df['m'], c=COLORS['USURF'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('SURF (cross-check)')
fig.add_subplot(5,2,2)
for trial, df in d_usurf_n2[t].iteritems():
plt.scatter(df['srtk'], df['p'] / df['m'], c=COLORS['USURF_N2'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('SURF (ratio-test)')
fig.add_subplot(5,2,3)
for trial, df in d_usurfex[t].iteritems():
plt.scatter(df['srtk'], df['p'] / df['m'], c=COLORS['USURFEx'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('U-SURF (cross-check)')
fig.add_subplot(5,2,4)
for trial, df in d_usurfex_n2[t].iteritems():
plt.scatter(df['srtk'], df['p'] / df['m'], c=COLORS['USURFEx_N2'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('U-SURF (ratio-test)')
# ORB Variants
t = ORB_TREATMENTS[0]
fig.add_subplot(5,2,5)
for trial, df in d_orb_hamming[t].iteritems():
plt.scatter(df['rtk'], df['p'] / df['m'], c=COLORS['ORB_HAMMING'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.ylabel('Inlier-Outlier Ratio', fontsize=14)
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('ORB (cross-check)')
fig.add_subplot(5,2,6)
for trial, df in d_orb_hamming_n2[t].iteritems():
plt.scatter(df['rtk'], df['p'] / df['m'], c=COLORS['ORB_HAMMING_N2'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('ORB (ratio-test)')
fig.add_subplot(5,2,7)
for trial, df in d_orb_hamming2[t].iteritems():
plt.scatter(df['rtk'], df['p'] / df['m'], c=COLORS['ORB_HAMMING2'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('CLORB (cross-check)')
fig.add_subplot(5,2,8)
for trial, df in d_orb_hamming2_n2[t].iteritems():
plt.scatter(df['rtk'], df['p'] / df['m'], c=COLORS['ORB_HAMMING2_N2'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('CLORB (ratio-test)')
t = SIFT_TREATMENTS[0]
fig.add_subplot(5,2,9)
for trial, df in d_sift[t].iteritems():
plt.scatter(df['rtk'], df['p'] / df['m'], c=COLORS['SIFT'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.xlabel('True Speed (m/s)', fontsize=14)
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('SIFT (cross-check)')
fig.add_subplot(5,2,10)
for trial, df in d_sift_n2[t].iteritems():
plt.scatter(df['rtk'], df['p'] / df['m'], c=COLORS['SIFT_N2'], s=4, edgecolors='none')
plt.axis([1, 5, 0, 1])
plt.xlabel('True Speed (m/s)', fontsize=14)
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.5, 1.0], fontsize=14)
plt.title('SIFT (ratio-test)')
fig.show()
# Figure: SURF Variants
print("===============================================================")
fig = plt.figure()
fig.patch.set_facecolor('white')
handles = []
for t in SURF_TREATMENTS:
fig.add_subplot(2,2,1)
composite = pd.concat([df for trial, df in d_usurf[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_surf, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('SURF (cross-check)')
plt.ylabel('2nd-Order Best-Fit Error (m/s)', fontsize=14)
handles.append(line_surf)
plt.legend(handles=handles, loc=2)
handles = []
for t in SURF_TREATMENTS:
fig.add_subplot(2,2,2)
composite = pd.concat([df for trial, df in d_usurf_n2[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_surf_n2, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('SURF (ratio-test)')
handles.append(line_surf_n2)
plt.legend(handles=handles, loc=2)
handles = []
for t in SURF_TREATMENTS:
fig.add_subplot(2,2,3)
composite = pd.concat([df for trial, df in d_usurfex[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_surf2, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('U-SURF (cross-check)')
plt.ylabel('2nd-Order Best-Fit Error (m/s)', fontsize=14)
plt.xlabel('RTK-DGPS Speed (m/s)', fontsize=14)
handles.append(line_surf2)
plt.legend(handles=handles, loc=2)
handles = []
for t in SURF_TREATMENTS:
fig.add_subplot(2,2,4)
composite = pd.concat([df for trial, df in d_usurfex_n2[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_surf2_n2, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('U-SURF (ratio-test)')
plt.xlabel('RTK-DGPS Speed (m/s)', fontsize=14)
handles.append(line_surf2_n2)
plt.legend(handles=handles, loc=2)
fig.show()
# ORB Variants
print("===============================================================")
fig = plt.figure()
fig.patch.set_facecolor('white')
handles = []
fig.add_subplot(2,2,1)
for t in ORB_TREATMENTS:
composite = pd.concat([df for trial, df in d_orb_hamming[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_orb, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.ylabel('Error (m/s)', fontsize=14)
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('ORB (cross-check)')
plt.ylabel('2nd-Order Best-Fit Error (m/s)', fontsize=14)
handles.append(line_orb)
plt.legend(handles=handles, loc=2)
#
handles = []
fig.add_subplot(2,2,2)
for t in ORB_TREATMENTS:
composite = pd.concat([df for trial, df in d_orb_hamming_n2[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_orb_n2, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('ORB (ratio-test)')
handles.append(line_orb_n2)
plt.legend(handles=handles, loc=2)
#
handles = []
fig.add_subplot(2,2,3)
for t in ORB_TREATMENTS:
composite = pd.concat([df for trial, df in d_orb_hamming2[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_orb2, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('CLORB (cross-check)')
plt.xlabel('RTK-DGPS Speed (m/s)', fontsize=14)
plt.ylabel('2nd-Order Best-Fit Error (m/s)', fontsize=14)
handles.append(line_orb2)
plt.legend(handles=handles, loc=2)
#
handles = []
fig.add_subplot(2,2,4)
for t in ORB_TREATMENTS:
composite = pd.concat([df for trial, df in d_orb_hamming2_n2[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_orb2_n2, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('CLORB (ratio-test)')
plt.xlabel('RTK-DGPS Speed (m/s)', fontsize=14)
handles.append(line_orb2_n2)
plt.legend(handles=handles, loc=2)
fig.show()
# Figure: SIFT Variants
print("===============================================================")
# SIFT (cross-check)
fig = plt.figure()
fig.patch.set_facecolor('white')
fig.add_subplot(1,2,1)
handles = []
for t in SIFT_TREATMENTS:
composite = pd.concat([df for trial, df in d_sift[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_sift, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.xlabel('RTK-DGPS Speed (m/s)', fontsize=14)
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('SIFT (cross-check)')
plt.ylabel('2nd-Order Best-Fit Error (m/s)', fontsize=14)
handles.append(line_sift)
plt.legend(handles=handles, loc=2)
# SIFT (ratio-test)
fig.add_subplot(1,2,2)
handles = []
for t in SIFT_TREATMENTS:
composite = pd.concat([df for trial, df in d_sift_n2[t].iteritems()])
X,Y = poly2d(composite, kx='srtk', ky='se')
line_sift_n2, = plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.xlabel('RTK-DGPS Speed (m/s)', fontsize=14)
plt.axis([1, 5, 0, 0.4])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4], fontsize=14)
plt.title('SIFT (ratio-test)')
handles.append(line_sift_n2)
plt.legend(handles=handles, loc=2)
fig.show()
## Figure 7a. Barchart (by Feature-Detector): RTK-groups vs. RMSE
print("===============================================================")
output = open('rmse.csv', 'w')
output.write('algorithm,hz,rmse,rmse_1,rmse_2,rmse_3,rmse_4\n')
index = np.arange(NUM_GROUPS) + 1
bar_width = 0.09
opacity = 1.0
fig = plt.figure()
fig.patch.set_facecolor('white')
fig.add_subplot(1,2,1)
plt.axis([1, 5+bar_width, 0, 0.5])
# SURF Variants
t = SURF_TREATMENTS[0]
i = 1
alg = 'SURF_1NN'
composite = pd.concat([df for trial, df in d_usurf[t].iteritems()])
RMSE, groups = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['USURF'],
label='U-SURF')
i = 2
alg = 'SURF_2NN'
composite = pd.concat([df for trial, df in d_usurf_n2[t].iteritems()])
RMSE, groups = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['USURF_N2'],
label='U-SURF (ratio-test)')
i = 3
alg = 'USURF_1NN'
composite = pd.concat([df for trial, df in d_usurfex[t].iteritems()])
RMSE, groups = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['USURFEx'],
label='U-SURF Extended (cross-checking)')
i = 4
alg = 'USURF_2NN'
composite = pd.concat([df for trial, df in d_usurfex_n2[t].iteritems()])
RMSE, groups = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['USURFEx_N2'],
label='U-SURF Extended (ratio-test)')
# ORB Variants
t = ORB_TREATMENTS[0]
i = 5
alg = 'ORB_1NN'
composite = pd.concat([df for trial, df in d_orb_hamming[t].iteritems()])
RMSE, _ = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['ORB_HAMMING'],
label='ORB (cross-checking)')
i = 6
alg = 'ORB_2NN'
composite = pd.concat([df for trial, df in d_orb_hamming_n2[t].iteritems()])
RMSE, _ = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['ORB_HAMMING_N2'],
label='ORB (ratio-test)')
i = 7
alg = 'CLORB_1NN'
composite = pd.concat([df for trial, df in d_orb_hamming2[t].iteritems()])
RMSE, _ = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['ORB_HAMMING2'],
label='CLORB (cross-check)')
i = 8
alg = 'CLORB_2NN'
composite = pd.concat([df for trial, df in d_orb_hamming2_n2[t].iteritems()])
RMSE, _ = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['ORB_HAMMING2_N2'],
label='CLORB (ratio-test)')
# SIFT Variants
t = SIFT_TREATMENTS[0]
i = 9
alg = 'SIFT_1NN'
composite = pd.concat([df for trial, df in d_sift[t].iteritems()])
RMSE, _ = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['SIFT'],
label='SIFT (cross-check)')
i = 10
alg = 'SIFT_2NN'
composite = pd.concat([df for trial, df in d_sift_n2[t].iteritems()])
RMSE, _ = rmse_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
RMSE_all, _ = rmse_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
p = np.mean(composite['p'])
vals = [alg, p, RMSE_all[0]] + RMSE + ['\n']
newline = ','.join([str(v) for v in vals])
print newline
output.write(newline)
plt.bar(index+bar_width*i, RMSE, bar_width,
alpha=opacity,
color=COLORS['SIFT_N2'],
label='SIFT (ratio-test)')
plt.xticks([1.5,2.5,3.5,4.5], ['1.0 - 2.0', '2.0 - 3.0', '3.0 - 4.0', '4.0 - 5.0'], fontsize=14)
plt.yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5], fontsize=14)
plt.ylabel('RMSE (m/s)', fontsize=14)
plt.xlabel('Speed (m/s)', fontsize=14)
fig.show()
output.close()
## Figure 7b. Barchart (by Feature-Detector): RTK-groups vs. 95th
print("===============================================================")
output = open('p95.csv', 'w')
output.write('algorithm,hz,p95,p95_1,p95_2,p95_3,p95_4\n')
fig.add_subplot(1,2,2)
plt.axis([1, 5+bar_width, 0, 0.5])
# SURF Variants
t = SURF_TREATMENTS[0]
alg = 'USURF'
i = 1
composite = pd.concat([df for trial, df in d_usurf[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg]
)
i = 2
alg = 'USURF_N2'
composite = pd.concat([df for trial, df in d_usurf_n2[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg]
)
i = 3
alg = 'USURFEx'
composite = pd.concat([df for trial, df in d_usurfex[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg]
)
i = 4
alg = 'USURFEx_N2'
composite = pd.concat([df for trial, df in d_usurfex_n2[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg]
)
# ORB Variants
t = ORB_TREATMENTS[0]
alg = 'ORB_HAMMING'
i = 5
composite = pd.concat([df for trial, df in d_orb_hamming[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg]
)
i = 6
alg = 'ORB_HAMMING_N2'
composite = pd.concat([df for trial, df in d_orb_hamming_n2[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg]
)
i = 7
alg = 'ORB_HAMMING2'
composite = pd.concat([df for trial, df in d_orb_hamming2[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg])
i = 8
alg = 'ORB_HAMMING2_N2'
composite = pd.concat([df for trial, df in d_orb_hamming2_n2[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg])
# SIFT Variants
t = SIFT_TREATMENTS[0]
i = 9
alg = 'SIFT'
composite = pd.concat([df for trial, df in d_sift[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg])
i = 10
alg = 'SIFT_N2'
composite = pd.concat([df for trial, df in d_sift_n2[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS, SPEED_RANGE)
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
plt.bar(index+bar_width*i, P95, bar_width,
alpha=opacity,
color=COLORS[alg], hatch=HATCHES[alg],
label=ALGORITHM_LABELS[alg])
plt.xticks([1.5,2.5,3.5,4.5], ['1.0 - 2.0', '2.0 - 3.0', '3.0 - 4.0', '4.0 - 5.0'], fontsize=14)
plt.yticks([0, 0.1, 0.2, 0.3, 0.4, 0.5], fontsize=14)
plt.ylabel('95th Percentile Error (m/s)', fontsize=14)
plt.xlabel('Speed (m/s)', fontsize=14)
fig.show()
output.close()
## Figure 7c. Lines (by Feature-Detector): RTK-groups vs. 95th
print("===============================================================")
output = open('p95_by_threshold.csv', 'w')
output.write('algorithm,threshold,hz,n,p95,p95_1,p95_2,p95_3,p95_4\n')
fig = plt.figure()
fig.patch.set_facecolor('white')
# SURF Variants
handles = []
for t in SURF_TREATMENTS:
alg = 'USURF'
fig.add_subplot(2,2,1)
composite = pd.concat([df for trial, df in d_usurf[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.ylabel('Error (m/s)', fontsize=14)
plt.title('SURF (cross-check)')
handles.append(line)
plt.legend(handles=handles, loc=2)
handles = []
for t in SURF_TREATMENTS:
alg = 'USURF_N2'
fig.add_subplot(2,2,2)
composite = pd.concat([df for trial, df in d_usurf_n2[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.title('SURF (ratio-test)')
handles.append(line)
plt.legend(handles=handles, loc=2)
handles = []
for t in SURF_TREATMENTS:
alg = 'USURFEx'
fig.add_subplot(2,2,3)
composite = pd.concat([df for trial, df in d_usurfex[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.ylabel('Error (m/s)', fontsize=14)
plt.xlabel('Speed (m/s)', fontsize=14)
plt.title('U-SURF (cross-check)')
handles.append(line)
plt.legend(handles=handles, loc=2)
handles = []
for t in SURF_TREATMENTS:
alg = 'USURFEx_N2'
fig.add_subplot(2,2,4)
composite = pd.concat([df for trial, df in d_usurfex_n2[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.xlabel('Speed (m/s)', fontsize=14)
plt.title('U-SURF (ratio-test)')
handles.append(line)
plt.legend(handles=handles, loc=2)
fig.show()
# ORB Variants
fig = plt.figure()
fig.patch.set_facecolor('white')
handles = []
for t in ORB_TREATMENTS:
alg = 'ORB_HAMMING'
fig.add_subplot(2,2,1)
composite = pd.concat([df for trial, df in d_orb_hamming[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.ylabel('Error (m/s)', fontsize=14)
plt.title('ORB (cross-check)')
handles.append(line)
plt.legend(handles=handles, loc=2)
handles = []
for t in ORB_TREATMENTS:
alg = 'ORB_HAMMING_N2'
fig.add_subplot(2,2,2)
composite = pd.concat([df for trial, df in d_orb_hamming_n2[t].iteritems()])
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.title('ORB (ratio-test)')
handles.append(line)
plt.legend(handles=handles, loc=2)
handles = []
for t in ORB_TREATMENTS:
alg = 'ORB_HAMMING2'
fig.add_subplot(2,2,3)
composite = pd.concat([df for trial, df in d_orb_hamming2[t].iteritems()])
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.xlabel('Speed (m/s)', fontsize=14)
plt.ylabel('Error (m/s)', fontsize=14)
plt.title('CLORB (cross-check)')
handles.append(line)
plt.legend(handles=handles, loc=2)
handles = []
for t in ORB_TREATMENTS:
alg = 'ORB_HAMMING2_N2'
fig.add_subplot(2,2,4)
composite = pd.concat([df for trial, df in d_orb_hamming2_n2[t].iteritems()])
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.xlabel('Speed (m/s)', fontsize=14)
plt.title('CLORB (ratio-test)')
handles.append(line)
plt.legend(handles=handles, loc=2)
fig.show()
# SIFT Variants
fig = plt.figure()
fig.patch.set_facecolor('white')
handles = []
for t in SIFT_TREATMENTS:
alg = 'SIFT'
fig.add_subplot(2,2,3)
composite = pd.concat([df for trial, df in d_sift[t].iteritems()])
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.ylabel('Error (m/s)', fontsize=14)
plt.xlabel('Speed (m/s)', fontsize=14)
plt.title('SIFT (cross-check)')
handles.append(line)
plt.legend(handles=handles, loc=2)
handles = []
for t in SIFT_TREATMENTS:
alg = 'SIFT_N2'
fig.add_subplot(2,2,4)
composite = pd.concat([df for trial, df in d_sift_n2[t].iteritems()])
P95, groups = p95_by_group(composite['sv'], composite['srtk'], NUM_GROUPS*SUB_GROUPS, SPEED_RANGE)
P95_all, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
vals = [alg,t,hz,P95_all[0]] + P95 + ['\n']
newline = ','.join([str(v) for v in vals])
output.write(newline)
line, = plt.plot(groups, P95, c='black', linestyle=LINE_TYPES[t], label=t+' (%2.1f Hz)' % np.nanmean(composite['hz']))
plt.axis([1, 5, 0, 0.6])
plt.xticks([1,2,3,4,5], fontsize=14)
plt.yticks([0, 0.2, 0.4, 0.6], fontsize=14)
plt.xlabel('Speed (m/s)', fontsize=14)
plt.title('SIFT (ratio-test)')
handles.append(line)
plt.legend(handles=handles, loc=2)
fig.show()
output.close()
## Figure 7d. Lines (by Feature-Detector): Hz vs. 95th
print("===============================================================")
fig = plt.figure()
fig.patch.set_facecolor('white')
fig.add_subplot(1,1,1)
# SURF Variants
alg = 'USURF'
hz_all = []
P95_all = []
for t in SURF_TREATMENTS:
composite = pd.concat([df for trial, df in d_usurf[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
alg = 'USURF_N2'
hz_all = []
P95_all = []
for t in SURF_TREATMENTS:
composite = pd.concat([df for trial, df in d_usurf_n2[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
alg = 'USURFEx'
hz_all = []
P95_all = []
for t in SURF_TREATMENTS:
composite = pd.concat([df for trial, df in d_usurfex[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
alg = 'USURFEx_N2'
hz_all = []
P95_all = []
for t in SURF_TREATMENTS:
composite = pd.concat([df for trial, df in d_usurfex_n2[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
# ORB Variants
alg = 'ORB_HAMMING'
hz_all = []
P95_all = []
for t in ORB_TREATMENTS:
composite = pd.concat([df for trial, df in d_orb_hamming[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
alg = 'ORB_HAMMING_N2'
hz_all = []
P95_all = []
for t in ORB_TREATMENTS:
composite = pd.concat([df for trial, df in d_orb_hamming_n2[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
alg = 'ORB_HAMMING2'
hz_all = []
P95_all = []
for t in ORB_TREATMENTS:
composite = pd.concat([df for trial, df in d_orb_hamming2[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
alg = 'ORB_HAMMING2_N2'
hz_all = []
P95_all = []
for t in ORB_TREATMENTS:
composite = pd.concat([df for trial, df in d_orb_hamming2_n2[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
# SIFT Variants
alg = 'SIFT'
hz_all = []
P95_all = []
for t in SIFT_TREATMENTS:
composite = pd.concat([df for trial, df in d_sift[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz = np.mean(composite['hz'])
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
alg = 'SIFT_N2'
hz_all = []
P95_all = []
for t in SIFT_TREATMENTS:
composite = pd.concat([df for trial, df in d_sift_n2[t].iteritems()])
P95, _ = p95_by_group(composite['sv'], composite['srtk'], 1, SPEED_RANGE)
hz_all.append(hz)
P95_all.append(P95)
plt.scatter(hz_all, P95_all, c=COLORS[alg], marker=MARKERS[alg], s=MARKER_SIZE, label=ALGORITHM_LABELS[alg])
plt.axis([0, 60, 0.2, 0.3])
plt.xticks([10,20,30,40,50,60], fontsize=14)
plt.yticks([0.2, 0.22, 0.24, 0.26, 0.28, 0.3], fontsize=14)
plt.xlabel('Processing Rate (Hz)', fontsize=14)
plt.ylabel('95th Percentile Error (m/s)', fontsize=14)
plt.legend(scatterpoints=1, loc=4)
fig.show()
## Figure: Percent Error (By Surface) for a specific algorithm(s)
print("===============================================================")
fig = plt.figure()
fig.patch.set_facecolor('white')
targets = ['ORB_HAMMINGCL']
a = 1.0
# Gravel Subplot
fig.add_subplot(3,2,1)
surface='gravel'
for alg, trials in d_gravel.iteritems(): # Gravel
if alg in targets:
for df in trials:
plt.scatter(df['drtk'], df['d'], c=COLORS[alg], s=1, edgecolors='none', alpha=a)
plt.axis([-2.5, 2.5, -20, 20])
plt.xticks([-2.0, -1.0, 0.0, 1.0, 2.0], fontsize=14)
plt.yticks([-20, 0, 20], fontsize=14)
plt.title(SURFACE_LABELS[surface])
# Asphault subplot
fig.add_subplot(3,2,2)
surface='asphault'
for alg, trials in d_asphault.iteritems(): # Asphault
if alg in targets:
for df in trials:
plt.scatter(df['drtk'], df['d'], c=COLORS[alg], s=1, edgecolors='none', alpha=a)
plt.axis([-2.5, 2.5, -20, 20])
plt.xticks([-2.0, -1.0, 0.0, 1.0, 2.0], fontsize=14)
plt.yticks([-20, 0, 20], fontsize=14)
plt.title(SURFACE_LABELS[surface])
# Grass Subplot
fig.add_subplot(3,2,3)
surface='grass'
for alg, trials in d_grass.iteritems(): # Grass
if alg in targets:
for df in trials:
plt.scatter(df['drtk'], df['d'], c=COLORS[alg], s=1, edgecolors='none', alpha=a)
plt.axis([-2.5, 2.5, -20, 20])
plt.xticks([-2.0, -1.0, 0.0, 1.0, 2.0], fontsize=14)
plt.yticks([-20, 0, 20], fontsize=14)
plt.title(SURFACE_LABELS[surface])
plt.ylabel('Percent Error (%)', fontsize=14)
# Residue Subplot
surface='residue'
fig.add_subplot(3,2,4)
for alg, trials in d_residue.iteritems(): # Residue
if alg in targets:
for df in trials:
plt.scatter(df['drtk'], df['d'], c=COLORS[alg], s=1, edgecolors='none', alpha=a)
plt.axis([-2.5, 2.5, -20, 20])
plt.xticks([-2.0, -1.0, 0.0, 1.0, 2.0], fontsize=14)
plt.yticks([-20, 0, 20], fontsize=14)
plt.title(SURFACE_LABELS[surface])
# Corn Subplot
surface='corn'
fig.add_subplot(3,2,5)
for alg, trials in d_corn.iteritems(): # Corn
if alg in targets:
for df in trials:
plt.scatter(df['drtk'], df['d'], c=COLORS[alg], s=1, edgecolors='none', alpha=a)
plt.axis([-2.5, 2.5, -20, 20])
plt.xticks([-2.0, -1.0, 0.0, 1.0, 2.0], fontsize=14)
plt.yticks([-20, 0, 20], fontsize=14)
plt.title(SURFACE_LABELS[surface])
plt.xlabel(r'Acceleration $\mathregular{(m/s^{2})}}$', fontsize=14)
# Hay Subplot
surface='hay'
fig.add_subplot(3,2,6)
for alg, trials in d_hay.iteritems(): # hay
if alg in targets:
for df in trials:
plt.scatter(df['drtk'], df['d'], c=COLORS[alg], s=1, edgecolors='none', alpha=a)
plt.axis([-2.5, 2.5, -20, 20])
plt.xticks([-2.0, -1.0, 0.0, 1.0, 2.0], fontsize=14)
plt.yticks([-20, 0, 20], fontsize=14)
plt.title(SURFACE_LABELS[surface])
plt.xlabel('Acceleration $\mathregular{(m/s^{2})}}$', fontsize=14)
fig.show()
# Figure (Threshold x Feature-Detector): RTK vs Hz
fig = plt.figure()
fig.patch.set_facecolor('white')
# SURF Variants
for t in SURF_TREATMENTS:
fig.add_subplot(5,2,1)
alg = 'SURF'
composite = pd.concat([df for trial, df in d_usurf[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('SURF (cross-check)')
fig.add_subplot(5,2,2)
alg = 'SURF_N2'
composite = pd.concat([df for trial, df in d_usurf_n2[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('SURF (ratio-test)')
fig.add_subplot(5,2,3)
alg = 'USURFEx'
composite = pd.concat([df for trial, df in d_usurfex[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('U-SURF Extended (cross-check)')
fig.add_subplot(5,2,4)
alg = 'USURFEx_N2'
composite = pd.concat([df for trial, df in d_usurfex_n2[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('U-SURF Extended (ratio-test)')
# ORB Variants
for t in ORB_TREATMENTS:
fig.add_subplot(5,2,5)
alg = 'ORB_HAMMING'
composite = pd.concat([df for trial, df in d_orb_hamming[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('ORB (cross-check)')
fig.add_subplot(5,2,6)
alg = 'ORB_HAMMING_N2'
composite = pd.concat([df for trial, df in d_orb_hamming_n2[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('ORB (ratio-test)')
fig.add_subplot(5,2,7)
alg = 'ORB_HAMMINGCL'
composite = pd.concat([df for trial, df in d_orb_hamming2[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('CLORB (cross-check)')
fig.add_subplot(5,2,8)
alg = 'ORB_HAMMINGCL_N2'
composite = pd.concat([df for trial, df in d_orb_hamming2_n2[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('CLORB (ratio-test)')
# SIFT Variants
for t in SIFT_TREATMENTS:
fig.add_subplot(5,2,9)
alg = 'SIFT'
composite = pd.concat([df for trial, df in d_sift[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('SIFT (cross-check)')
fig.add_subplot(5,2,10)
alg = 'SIFT_N2'
composite = pd.concat([df for trial, df in d_sift_n2[t].iteritems()])
X, Y = poly2d(composite, kx='rtk', ky='hz')
plt.plot(X, Y, c='black', linestyle=LINE_TYPES[t])
plt.axis([1, 5, 0, 50])
plt.title('SIFT (ratio-test)')
fig.show()
# Figure (by Feature-Detector): Hz vs N-features (best-of)
print("===============================================================")
fig = plt.figure()
fig.patch.set_facecolor('white')
fig.add_subplot(1,1,1)
# SURF Variants
t = SURF_TREATMENTS[0]
for trial, df in d_usurf[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['USURF'], s=1, edgecolors='none', alpha=0.2)
for trial, df in d_usurf_n2[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['USURF_N2'], s=1, edgecolors='none', alpha=0.2)
for trial, df in d_usurfex[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['USURFEx'], s=1, edgecolors='none', alpha=0.2)
for trial, df in d_usurfex_n2[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['USURFEx_N2'], s=1, edgecolors='none', alpha=0.2)
# ORB Variants
t = ORB_TREATMENTS[0]
for trial, df in d_orb_hamming[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['ORB_HAMMING'], s=1, edgecolors='none', alpha=0.2)
for trial, df in d_orb_hamming_n2[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['ORB_HAMMING_N2'], s=1, edgecolors='none', alpha=0.2)
for trial, df in d_orb_hamming2[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['ORB_HAMMING2'], s=1, edgecolors='none', alpha=0.2)
for trial, df in d_orb_hamming2_n2[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['ORB_HAMMING2_N2'], s=1, edgecolors='none', alpha=0.2)
# SIFT Variants
t = SIFT_TREATMENTS[0]
for trial, df in d_sift[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['SIFT'], s=1, edgecolors='none', alpha=0.2)
for trial, df in d_sift_n2[t].iteritems():
plt.scatter(df['p'], df['hz'], c=COLORS['SIFT_N2'], s=1, edgecolors='none', alpha=0.2)
plt.axis([0, 500, 0, 60])
plt.ylabel('Processing Time (Hz)', fontsize = 14)
plt.yticks([0,20,40,60], fontsize = 14)
plt.xlabel('Valid Matches', fontsize = 14)
plt.xticks([0,250,500], fontsize = 14)
#plt.legend(handles=LEGEND)
fig.show()
## Plot all and wait...
plt.waitforbuttonpress()
|
[
"tpstanhope@gmail.com"
] |
tpstanhope@gmail.com
|
fd36dc67de161b031676e44b670298e4f4402057
|
fd6908925e3444b6e42ef6436a8a6d95efa4de53
|
/tests/test_shapelet.py
|
7a8197d38653e755a4beb45e34dd58b3de6bfa8a
|
[
"MIT"
] |
permissive
|
krzjoa/sciquence
|
4af82253242c103aedefb6a34bf0f5eaa549970e
|
6a5f758c757200fffeb0fdc9206462f1f89e2444
|
refs/heads/master
| 2021-11-24T14:48:01.326025
| 2021-11-15T08:11:39
| 2021-11-15T08:11:39
| 77,182,514
| 8
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 119
|
py
|
import unittest
import numpy as np
from sciquence.sequences import *
class TestShapelets(unittest.TestCase):
pass
|
[
"kjoa.92@gmail.com"
] |
kjoa.92@gmail.com
|
5cc7ccf096ca031302897a48b3eaacddf5f1e01a
|
ab891104ef1d067e545cc317b4cdac528c1839ca
|
/classify/korea_classify.py
|
7c151f0d1d3a227cbb7267b5833d2a2ea5930ba2
|
[
"MIT"
] |
permissive
|
Lvious/tweet_search
|
93fd7f9655ddd4186b0b12e3700d657a42260db5
|
de61aa0c567e8d1857593e07cfae1eb6083731cd
|
refs/heads/master
| 2021-09-09T23:23:05.101323
| 2018-03-20T07:58:33
| 2018-03-20T07:58:33
| 113,951,863
| 1
| 0
| null | 2017-12-12T06:24:00
| 2017-12-12T06:24:00
| null |
UTF-8
|
Python
| false
| false
| 2,062
|
py
|
import os
import re
import time
import json
from datetime import datetime
from tqdm import tqdm
import codecs
import numpy as np
import subprocess
import multiprocessing
#from multiprocessing import Pool
from multiprocessing.dummy import Pool
from pprint import pprint
import pymongo
from pymongo import InsertOne, DeleteMany, ReplaceOne, UpdateOne
from Config import get_spider_config
_,db,r = get_spider_config()
re_prob = re.compile('(?:__label__(\d)\s([^_]+)[\s]*)')
def batch_ftpredict(texts):
if type(texts) != list:
texts = [texts]
pid = os.getpid()
tmf = '/home/ubuntu/work/'+str(pid)+'.txt'
with codecs.open(tmf,'w','utf-8') as f:
for line in texts:
f.write(line+'\n')
f.close()
p=subprocess.Popen(['/home/ubuntu/work/fastText-0.1.0/fasttext',
'predict-prob',
'/home/ubuntu/work/korea.bin',
tmf,
'2'],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result,error = p.communicate()
#print result,error
probs = []
for line in result.splitlines():
prob = re_prob.findall(line)
prob = sorted(prob,key=lambda item:item[0])
prob = [float(i[1]) for i in prob]
probs.append(prob)
probs_dict = []
for prob in probs:
probs_dict.append(dict(zip(['0','1'],prob)))
return probs_dict
def classify():
query = db.korea.find({'class':None},{'_id':1,'tweet.raw_text':1})
ids = []
texts = []
for i in query:
ids.append(i['_id'])
texts.append(i['tweet']['raw_text'])
if len(ids) == 0:
return None
probs = batch_ftpredict(texts)
requests = [UpdateOne({'_id': _id,'class':None}, {'$set': {'class':probs[index]}}) for index,_id in tqdm(enumerate(ids))]
result = db.korea.bulk_write(requests)
pprint(result.bulk_api_result)
if __name__ == '__main__':
print 'classify_worker start!'
while True:
queue = r.lpop('task:classify')
if queue:
print 'classify_worker process!'
classify()
message = json.loads(queue)
print message
if message['is_last']:
r.rpush('task:clustering',json.dumps(message))
print 'classify_worker wait!'
time.sleep(1)
|
[
"lvxq777@gmail.com"
] |
lvxq777@gmail.com
|
723147bf3087a5c27225f13150bea61aae4f8dd8
|
5f07ba47b8e7329431936b9300f9f0cde4f00b1f
|
/packager/core/repo_tools.py
|
5809b796703f6168c3a55a5413c3eff659716d31
|
[
"MIT"
] |
permissive
|
csdms/packagebuilder
|
426653434d9e11501f6ea22202bf327e1d44b74e
|
a72f1d264d9219acfb422864fbcd57dfd6cfd51b
|
refs/heads/master
| 2021-01-25T03:48:14.837344
| 2014-12-17T17:00:11
| 2014-12-17T17:00:11
| 23,443,218
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,938
|
py
|
import os
import shutil
import urllib
import zipfile
import tempfile
def download(repo, dest="."):
'''
Downloads a zip archive of the given repository to the specified
(default is current) directory.
'''
url = "https://github.com/{0}/archive/master.zip".format(repo)
local_file = os.path.join(dest, os.path.basename(repo) + ".zip")
urllib.urlretrieve(url, local_file)
return local_file
def unpack(fname, dest="."):
'''
Unpacks a zip archive containing the contents of the repo to the
specified (default is current) directory.
'''
z = zipfile.ZipFile(fname, mode='r')
z.extractall(dest)
files = z.namelist()
prefix = os.path.commonprefix(files)
return os.path.join(dest, prefix)
def read(fname):
'''
Reads a list of items, as strings, from a text file.
'''
with open(fname, "r") as f:
items = f.read().split("\n")
items.pop(0) # remove first and
items.pop() # last items from list
return items
def get_module(module_name, dest="."):
'''
Downloads a set of repositories and attempts to locate the directory
containing the setup files for the given module. If found, the directory
path is returned.
'''
repo_file = os.path.join(os.path.dirname(__file__), \
"..", "repositories.txt")
repos = read(repo_file)
for r in repos:
zip_file = download(r, dest)
unpack_dir = unpack(zip_file, dest)
module_dir = os.path.join(unpack_dir, module_name, "")
if os.path.isdir(module_dir):
return module_dir
return None
def main():
repo = "csdms/rpm_models"
tmp_dir = tempfile.mkdtemp(prefix=main.__module__)
try:
zip_file = download(repo, dest=tmp_dir)
unpack(zip_file, dest=tmp_dir)
except Exception:
raise
finally:
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
main()
|
[
"mark.piper@colorado.edu"
] |
mark.piper@colorado.edu
|
bb23db2e8bfd75b8899d1c4da704570fab3d4381
|
4cd6124d000833ef9f544fcac90b585b2ae50e16
|
/map.py
|
816c92469f87d12421edc4393ebae977ec54de80
|
[] |
no_license
|
zoeab12/CM1101_4
|
042f5bc79e1d932b5a5ecab472d05d4c69315fb8
|
710b1653b6a5eaa5a88004e6a41803e97b69a61d
|
refs/heads/master
| 2020-04-01T21:17:46.839654
| 2018-10-18T15:46:11
| 2018-10-18T15:46:11
| 153,648,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,794
|
py
|
room_reception = {
"name": "Reception",
"description":
"""You are in a maze of twisty little passages, all alike.
Next to you is the School of Computer Science and
Informatics reception. The receptionist, Matt Strangis,
seems to be playing an old school text-based adventure
game on his computer. There are corridors leading to the
south and east. The exit is to the west.""",
"exits": {"south": "Admins","east": "Tutor", "west": "Parking"}
}
room_admins = {
"name": "MJ and Simon's room",
"description":
"""You are leaning agains the door of the systems managers'
room. Inside you notice Matt "MJ" John and Simon Jones. They
ignore you. To the north is the reception.""",
"exits": {"north": "Reception"}
}
room_tutor = {
"name": "your personal tutor's office",
"description":
"""You are in your personal tutor's office. He intently
stares at his huge monitor, ignoring you completely.
On the desk you notice a cup of coffee and an empty
pack of biscuits. The reception is to the west.""",
"exits": {"west": "Reception"}
}
room_parking = {
"name": "the parking lot",
"description":
"""You are standing in the Queen's Buildings parking lot.
You can go south to the COMSC reception, or east to the
general office.""",
"exits": {"east": "Office", "south": "Reception"}
}
room_office = {
"name": "the general office",
"description":
"""You are standing next to the cashier's till at
30-36 Newport Road. The cashier looks at you with hope
in their eyes. If you go west you can return to the
Queen's Buildings.""",
"exits": {"west": "Parking"}
}
rooms = {
"Reception": room_reception,
"Admins": room_admins,
"Tutor": room_tutor,
"Parking": room_parking,
"Office": room_office
}
|
[
"zoe.bessant@hotmail.co.uk"
] |
zoe.bessant@hotmail.co.uk
|
6ecd59fad77cc9037c9fe14c0cd32ff615282066
|
80f292ba120a2d3f196a46f0de5c93c277124421
|
/game_play.py
|
c8a617d45b63034b091d6c294cda7c473d02addf
|
[] |
no_license
|
ack8006/DomYon
|
6debf552b4ff97287e3ff2b297e622e3815dedd9
|
bcb4633cff94c20ca6ce6a0de1d9f1b800bbff0f
|
refs/heads/master
| 2021-01-12T08:46:39.579109
| 2016-12-22T18:47:21
| 2016-12-22T18:47:21
| 76,689,650
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,073
|
py
|
__author__ = 'stevenkerr'
import card_classes
from player_one_turn import take_turn
from random import shuffle
bank = card_classes.bank
trash = card_classes.trash
name_to_inst_dict = card_classes.name_to_inst_dict
class Player:
def __init__(self):
self.deck = []
self.hand = []
self.discard = []
self.played_actions = []
def print_list_name(self, n_list):
n_list_print = []
for x in range (0,len(n_list)):
n_list_print.append(n_list[x].name)
print n_list_print
def shuffle_discards(self):
self.deck.extend(self.discard)
shuffle(self.deck)
self.discard = []
def draw_cards(self, num):
for x in range(0,num):
if len(self.deck) == 0:
if len(self.discard) ==0:
break
self.shuffle_discards()
card_type = self.deck.pop(0)
self.hand.append(card_type)
def victory_count(self):
count = 0
for x in range(0,len(self.deck)):
victory_points = self.deck[x].victory
count = victory_points + count
for x in range(0,len(self.discard)):
victory_points = self.discard[x].victory
count = victory_points + count
for x in range(0,len(self.hand)):
victory_points = self.hand[x].victory
count += victory_points
return count
def check_player_for_card_type(self,card_type):
in_hand = self.check_for_card_type(self.hand,card_type)
in_deck = self.check_for_card_type(self.deck,card_type)
in_discard = self.check_for_card_type(self.discard,card_type)
in_played_actions = self.check_for_card_type(self.played_actions,card_type)
total = in_deck + in_discard + in_hand + in_played_actions
return total
def check_for_card_type(self,list,looking_for):
number_found = 0
for x in range(0,len(list)):
if list[x].name == looking_for:
number_found += 1
return number_found
def index_of_card_type(self,list,looking_for):
for x in range(0,len(list)):
if list[x].name == looking_for:
index = x
return index
def discard_hand(self):
self.discard.extend(self.hand)
self.hand = []
def gain_card(self,ctg,spending_max):
if bank[ctg.name] > 0 and ctg.cost <= spending_max:
bank[ctg.name] -= 1
self.discard.append(ctg)
elif bank[ctg.name] <= 0:
print "There are no more ", ctg.name, " remaining in the bank"
else:
print "Insufficient funds"
def discard_played_actions(self):
self.discard.extend(self.played_actions)
self.played_actions = []
def trash_card(self,list,card):
index = self.index_of_card_type(list,card)
list.pop(index)
trash[card] += 1
class Game_result():
def __init__(self):
self.player_one_turns = 0
self.player_two_turns = 0
self.player_one_points = 0
self.player_two_points = 0
def print_one_result(self):
print "Player one turns = ", self.player_one_turns
print "Player two turns = ", self.player_two_turns
print "Player one points = ", self.player_one_points
print "Player two points = ", self.player_two_points
def determine_winner(self):
if self.player_one_points > self.player_two_points:
winner = 'Player 1'
elif self.player_two_points > self.player_one_points:
winner = 'Player 2'
elif self.player_two_points == self.player_one_points and self.player_one_turns > self.player_two_turns:
winner = 'Player 2'
elif self.player_two_points == self.player_one_points:
winner = 'Tie'
else:
winner = 'Error'
return winner
def sum_winners(self,winner_list):
winner = self.determine_winner()
winner_list[0] +=1
if winner == 'Player 1':
winner_list[1] += 1
elif winner == 'Player 2':
winner_list[2] += 1
elif winner == 'Tie':
winner_list[3] += 1
else:
winner_list[4] += 1
return winner_list
def play_game(player_one, player_two):
player_one_turns = 0
player_two_turns = 0
while check_game_not_over():
if player_one_turns == 0 :
player_one.shuffle_discards()
player_two.shuffle_discards()
player_one.draw_cards(5)
player_two.draw_cards(5)
take_turn(player_one,player_two,1)
player_one_turns += 1
if check_game_not_over():
take_turn(player_two,player_one,2)
player_two_turns += 1
game_result = Game_result()
game_result.player_one_turns = player_one_turns
game_result.player_two_turns = player_two_turns
game_result.player_one_points = player_one.victory_count()
game_result.player_two_points = player_two.victory_count()
print game_result.player_one_points
print game_result.player_two_points
return game_result
def check_game_not_over():
if bank['Province'] > 0 and check_three_pile_finish() < 3:
not_over = True
else:
not_over = False
return not_over
def check_three_pile_finish():
count = 0
for x in range(0,len(card_classes.all_cards_in_play_list)):
if bank[card_classes.all_cards_in_play_list[x]] <= 0:
count += 1
return count
def reset_bank():
for x in range(0,10):
bank[card_classes.kingdom_cards[x]] = 10
bank['Province'] = 8
bank['Duchy'] = 8
bank['Gold'] = 25
bank['Silver'] = 40
bank['Estate'] = 8
bank['Copper'] = 40
return bank
def reset_trash():
for x in range (0,len(card_classes.all_cards_in_play_list)):
trash[card_classes.all_cards_in_play_list[x]] = 0
def new_game(player):
copper = card_classes.Copper()
estate = card_classes.Estate()
player.deck = [copper,copper,copper,copper,copper,copper,copper,estate,estate,estate]
player.hand = []
player.discard = []
player.played_actions = []
return player
def print_winner_list(wl,r):
round_average = (r/2)/wl[0]
print "Total games played ", wl[0]
print "Player 1 Wins ", wl[1]
print "Play 2 Wins ", wl[2]
print "Ties " , wl[3]
print "Round average ", round_average
def play_game_two(num):
player_one = Player()
player_two = Player()
winner_list = [0,0,0,0,0]
rounds = 0
for x in range(0,num):
bank = reset_bank()
trash = reset_trash()
player_one = new_game(player_one)
player_two = new_game(player_two)
result_one = play_game(player_one,player_two)
winner_list = result_one.sum_winners(winner_list)
rounds += result_one.player_one_turns + result_one.player_two_turns
print_winner_list(winner_list,rounds)
print_winner_list(winner_list,rounds)
#How to run a game
play_game_two(50)
|
[
"atakata@tenorcapital.com"
] |
atakata@tenorcapital.com
|
f1f2316f32d38aa71d153d6109b0b0c795679f82
|
0add7953d3e3ce2df9e8265102be39b758579753
|
/built-in/TensorFlow/Official/cv/image_classification/ResNet101_for_TensorFlow/00-access/official/nlp/nhnet/trainer.py
|
e14c05eede937e73e55b2bf3b26dd737301eb2f6
|
[
"Apache-2.0"
] |
permissive
|
Huawei-Ascend/modelzoo
|
ae161c0b4e581f8b62c77251e9204d958c4cf6c4
|
df51ed9c1d6dbde1deef63f2a037a369f8554406
|
refs/heads/master
| 2023-04-08T08:17:40.058206
| 2020-12-07T08:04:57
| 2020-12-07T08:04:57
| 319,219,518
| 1
| 1
|
Apache-2.0
| 2023-03-24T22:22:00
| 2020-12-07T06:01:32
|
Python
|
UTF-8
|
Python
| false
| false
| 8,717
|
py
|
# Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run NHNet model training and eval."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
from six.moves import zip
import tensorflow as tf
from official.modeling.hyperparams import params_dict
from official.nlp.nhnet import evaluation
from official.nlp.nhnet import input_pipeline
from official.nlp.nhnet import models
from official.nlp.nhnet import optimizer
from official.nlp.transformer import metrics as transformer_metrics
from official.utils.misc import distribution_utils
FLAGS = flags.FLAGS
def define_flags():
"""Defines command line flags used by NHNet trainer."""
## Required parameters
flags.DEFINE_enum("mode", "train", ["train", "eval", "train_and_eval"],
"Execution mode.")
flags.DEFINE_string("train_file_pattern", "", "Train file pattern.")
flags.DEFINE_string("eval_file_pattern", "", "Eval file pattern.")
flags.DEFINE_string(
"model_dir", None,
"The output directory where the model checkpoints will be written.")
# Model training specific flags.
flags.DEFINE_enum(
"distribution_strategy", "mirrored", ["tpu", "mirrored"],
"Distribution Strategy type to use for training. `tpu` uses TPUStrategy "
"for running on TPUs, `mirrored` uses GPUs with single host.")
flags.DEFINE_string("tpu", "", "TPU address to connect to.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_integer("train_steps", 100000, "Max train steps")
flags.DEFINE_integer("eval_steps", 32, "Number of eval steps per run.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 4, "Total batch size for evaluation.")
flags.DEFINE_integer(
"steps_per_loop", 1000,
"Number of steps per graph-mode loop. Only training step "
"happens inside the loop.")
flags.DEFINE_integer("checkpoint_interval", 2000, "Checkpointing interval.")
flags.DEFINE_integer("len_title", 15, "Title length.")
flags.DEFINE_integer("len_passage", 200, "Passage length.")
flags.DEFINE_integer("num_encoder_layers", 12,
"Number of hidden layers of encoder.")
flags.DEFINE_integer("num_decoder_layers", 12,
"Number of hidden layers of decoder.")
flags.DEFINE_string("model_type", "nhnet",
"Model type to choose a model configuration.")
flags.DEFINE_integer(
"num_nhnet_articles", 5,
"Maximum number of articles in NHNet, only used when model_type=nhnet")
flags.DEFINE_string(
"params_override",
default=None,
help=("a YAML/JSON string or a YAML file which specifies additional "
"overrides over the default parameters"))
# pylint: disable=protected-access
class Trainer(tf.keras.Model):
"""A training only model."""
def __init__(self, model, params):
super(Trainer, self).__init__()
self.model = model
self.params = params
self._num_replicas_in_sync = tf.distribute.get_strategy(
).num_replicas_in_sync
def call(self, inputs, mode="train"):
return self.model(inputs, mode)
def train_step(self, inputs):
"""The logic for one training step."""
with tf.GradientTape() as tape:
logits, _, _ = self(inputs, mode="train", training=True)
targets = models.remove_sos_from_seq(inputs["target_ids"],
self.params.pad_token_id)
loss = transformer_metrics.transformer_loss(logits, targets,
self.params.label_smoothing,
self.params.vocab_size)
# Scales the loss, which results in using the average loss across all
# of the replicas for backprop.
scaled_loss = loss / self._num_replicas_in_sync
tvars = self.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
self.optimizer.apply_gradients(list(zip(grads, tvars)))
return {
"training_loss": loss,
"learning_rate": self.optimizer._decayed_lr(var_dtype=tf.float32)
}
class SimpleCheckpoint(tf.keras.callbacks.Callback):
"""Keras callback to save tf.train.Checkpoints."""
def __init__(self, checkpoint_manager):
super(SimpleCheckpoint, self).__init__()
self.checkpoint_manager = checkpoint_manager
def on_epoch_end(self, epoch, logs=None):
step_counter = self.checkpoint_manager._step_counter.numpy()
self.checkpoint_manager.save(checkpoint_number=step_counter)
def train(params, strategy, dataset=None):
"""Runs training."""
if not dataset:
dataset = input_pipeline.get_input_dataset(
FLAGS.train_file_pattern,
FLAGS.train_batch_size,
params,
is_training=True,
strategy=strategy)
with strategy.scope():
model = models.create_model(
FLAGS.model_type, params, init_checkpoint=FLAGS.init_checkpoint)
opt = optimizer.create_optimizer(params)
trainer = Trainer(model, params)
model.global_step = opt.iterations
trainer.compile(
optimizer=opt,
experimental_steps_per_execution=FLAGS.steps_per_loop)
summary_dir = os.path.join(FLAGS.model_dir, "summaries")
summary_callback = tf.keras.callbacks.TensorBoard(
summary_dir, update_freq=max(100, FLAGS.steps_per_loop))
checkpoint = tf.train.Checkpoint(model=model, optimizer=opt)
checkpoint_manager = tf.train.CheckpointManager(
checkpoint,
directory=FLAGS.model_dir,
max_to_keep=10,
step_counter=model.global_step,
checkpoint_interval=FLAGS.checkpoint_interval)
if checkpoint_manager.restore_or_initialize():
logging.info("Training restored from the checkpoints in: %s",
FLAGS.model_dir)
checkpoint_callback = SimpleCheckpoint(checkpoint_manager)
# Trains the model.
steps_per_epoch = min(FLAGS.train_steps, FLAGS.checkpoint_interval)
epochs = FLAGS.train_steps // steps_per_epoch
trainer.fit(
x=dataset,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[summary_callback, checkpoint_callback],
verbose=2)
def run():
"""Runs NHNet using Keras APIs."""
strategy = distribution_utils.get_distribution_strategy(
distribution_strategy=FLAGS.distribution_strategy, tpu_address=FLAGS.tpu)
if strategy:
logging.info("***** Number of cores used : %d",
strategy.num_replicas_in_sync)
params = models.get_model_params(FLAGS.model_type)
params = params_dict.override_params_dict(
params, FLAGS.params_override, is_strict=True)
params.override(
{
"len_title":
FLAGS.len_title,
"len_passage":
FLAGS.len_passage,
"num_hidden_layers":
FLAGS.num_encoder_layers,
"num_decoder_layers":
FLAGS.num_decoder_layers,
"passage_list":
[chr(ord("b") + i) for i in range(FLAGS.num_nhnet_articles)],
},
is_strict=False)
stats = {}
if "train" in FLAGS.mode:
train(params, strategy)
if "eval" in FLAGS.mode:
timeout = 0 if FLAGS.mode == "train_and_eval" else 3000
# Uses padded decoding for TPU. Always uses cache.
padded_decode = isinstance(strategy, tf.distribute.experimental.TPUStrategy)
params.override({
"padded_decode": padded_decode,
}, is_strict=False)
stats = evaluation.continuous_eval(
strategy,
params,
model_type=FLAGS.model_type,
eval_file_pattern=FLAGS.eval_file_pattern,
batch_size=FLAGS.eval_batch_size,
eval_steps=FLAGS.eval_steps,
model_dir=FLAGS.model_dir,
timeout=timeout)
return stats
def main(_):
stats = run()
if stats:
logging.info("Stats:\n%s", stats)
if __name__ == "__main__":
define_flags()
app.run(main)
|
[
"1571856591@qq.com"
] |
1571856591@qq.com
|
0d7c1333fa626757d72485a2fd1fadb8b98f1b62
|
0857674730d5184747109c07d92b917ff4b16f93
|
/utils/__init__.py
|
4866c0fb332c45cdf33d11a7545edd94f6c0ad5f
|
[] |
no_license
|
adrienpillou/Pygame-Boilerplate
|
5cb21b97f29724d91ccf2a234d3f44259fe37672
|
a437ccc726405b2d11915fdfe47050b94d18f1eb
|
refs/heads/main
| 2023-01-13T10:29:41.006990
| 2020-11-23T09:03:19
| 2020-11-23T09:03:19
| 314,819,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 65
|
py
|
# __init__.py file
# Import your custom objects & classes here
|
[
"noreply@github.com"
] |
adrienpillou.noreply@github.com
|
770c94cb7aaedef112034ae2eff84c43d722c6f4
|
773a696eb15a0adcd6599ce3739dd13b1bbe81c3
|
/hood/forms.py
|
dc012e42cacda72062b54ba848341dbd9761ae86
|
[] |
no_license
|
GabrielSpear/NeighbourHoodW
|
4296a2d78cb17d050ad21039881a9df5b910b924
|
ac2552b0b0b918d9f0b51ba7dc7f5b9d577c4567
|
refs/heads/master
| 2020-03-21T07:24:14.150277
| 2018-06-28T10:33:48
| 2018-06-28T10:33:48
| 138,278,163
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 758
|
py
|
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm
class UserCreateForm(UserCreationForm):
'''
A form class which inherits properties from the user UserCreationForm
'''
class Meta:
'''
This will manually set the fields needed from the usercreation form
'''
fields = ('username', 'email', 'password1', 'password2')
model = get_user_model()
def __init__(self, *args, **kwargs):
'''
This will customize the labels on the signup form
to meet my own requirements
'''
super().__init__(*args, **kwargs)
self.fields['username'].label = 'Display Name'
self.fields['email'].label = "Email Address"
|
[
"gabrieldvjspear@gmail.com"
] |
gabrieldvjspear@gmail.com
|
7ee8e33a03ff992dcff44372bb1302425a589c7a
|
2a57cee9055825ce4f6f1477495c17acc0c2de80
|
/classification/deploy/infer.py
|
1b7701c960d815d6dd536710578dbf5875681ca6
|
[] |
no_license
|
bayer-science-for-a-better-life/beehive
|
60c72ad1d3098f0ea6daa582ee0a660dd6e4fa48
|
1b9a42833bd0dc1b95fe1c1c573d018659967308
|
refs/heads/main
| 2023-04-01T14:20:35.931453
| 2021-04-09T17:59:16
| 2021-04-09T17:59:16
| 356,354,493
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
import torch
from models import MajorClasses
def get_batches(n_total, batch_size=100):
'''Get ranges for dividing a set into batches of maximum size
Returns a list of tuples with (start, stop) indexes needed to divide a
list of length n_total into parts of maximum size batch_size (=100 default)
'''
out = []
running_n = n_total
upper = 0
lower = 0
while running_n > 0:
lower = upper
upper = lower + batch_size if running_n > batch_size else lower + running_n
out.append((lower, upper))
running_n += lower - upper
return out
def infer_bees(dataset):
major_classes = MajorClasses()
out = []
for part in get_batches(len(dataset)):
ds_part = dataset[part[0]:part[1]]
X = torch.stack([d for d in ds_part])
classes = major_classes(X)
for i in range(len(ds_part)):
out.append(dict(class_caller=classes[i]))
return out
|
[
"schweringmarc01@gmail.com"
] |
schweringmarc01@gmail.com
|
63e865771c8094b013731d783cf2de27cbc2a391
|
2fd0c65aa0f72133f773dac5d9a5c48fe9e26fac
|
/Dsz/PyScripts/Lib/dsz/mca/network/cmd/netmap/types.py
|
89224ef74f6056172fca014f1ce7e4f8069b3978
|
[] |
no_license
|
FingerLeakers/DanderSpritz_docs
|
f5d2430e0b86b1b2f0684f02ddd4fa973a5a7364
|
d96b6a71c039b329f9f81544f645857c75360e7f
|
refs/heads/master
| 2021-01-25T13:05:51.732149
| 2018-03-08T01:22:49
| 2018-03-08T01:22:49
| 123,527,268
| 2
| 0
| null | 2018-03-02T03:48:31
| 2018-03-02T03:48:30
| null |
UTF-8
|
Python
| false
| false
| 1,329
|
py
|
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.10 (default, Feb 6 2017, 23:53:20)
# [GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)]
# Embedded file name: types.py
from types import *
NETMAP_SCOPE_ALL = 0
NETMAP_SCOPE_CONNECTED = 1
NETMAP_SCOPE_REMEMBERED = 2
MSG_KEY_PARAMS = 65536
MSG_KEY_PARAMS_SCOPE = 65537
MSG_KEY_PARAMS_FLAGS = 65538
MSG_KEY_RESULT_MACHINE = 131072
MSG_KEY_RESULT_DATA = 196608
MSG_KEY_RESULT_DATA_FLAGS = 196609
MSG_KEY_RESULT_DATA_SCOPE = 196610
MSG_KEY_RESULT_DATA_TYPE = 196611
MSG_KEY_RESULT_DATA_DISPLAY_TYPE = 196612
MSG_KEY_RESULT_DATA_USAGE = 196613
MSG_KEY_RESULT_DATA_LEVEL = 196614
MSG_KEY_RESULT_DATA_REMOTE_NAME = 196615
MSG_KEY_RESULT_DATA_COMMENT = 196616
MSG_KEY_RESULT_DATA_PROVIDER = 196617
MSG_KEY_RESULT_DATA_LOCAL_NAME = 196618
MSG_KEY_RESULT_DATA_PARENT_NAME = 196619
MSG_KEY_RESULT_DATA_ADDRESSES = 196620
MSG_KEY_RESULT_OSINFO = 262144
MSG_KEY_RESULT_OSINFO_PLATFORM_TYPE = 262145
MSG_KEY_RESULT_OSINFO_OS_MAJOR_VERSION = 262146
MSG_KEY_RESULT_OSINFO_OS_MINOR_VERSION = 262147
MSG_KEY_RESULT_OSINFO_SOFTWARE = 262148
MSG_KEY_RESULT_TIME = 327680
MSG_KEY_RESULT_TIME_TIMEOFDAY = 327681
MSG_KEY_RESULT_TIME_TIMEZONE_OFFSET = 327682
MSG_KEY_RESULT_ERROR = 393216
MSG_KEY_RESULT_ERROR_MODULE = 393217
MSG_KEY_RESULT_ERROR_OS = 393218
|
[
"francisck@protonmail.ch"
] |
francisck@protonmail.ch
|
f799fb08166a95706835ba35532d2109210fc65a
|
168becb5071cabbaa67aaa520349ba7052aaa61f
|
/train.py
|
c9036997d7b9cc0790fb60b9c61e8608297850a1
|
[] |
no_license
|
gjtjx/ICNet-tensorflow
|
49d5ac2e5dd5f6e4f9788e01e7fbbfaed4622a93
|
0e53b5aeca8c916975c865dad72bacfa6a0a8f55
|
refs/heads/master
| 2021-08-24T01:32:29.212257
| 2017-12-07T13:21:30
| 2017-12-07T13:21:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,761
|
py
|
"""
This code is based on DrSleep's framework: https://github.com/DrSleep/tensorflow-deeplab-resnet
"""
from __future__ import print_function
import argparse
import os
import sys
import time
import tensorflow as tf
import numpy as np
from model import ICNet_BN
from tools import decode_labels, prepare_label
from image_reader import ImageReader
IMG_MEAN = np.array((103.939, 116.779, 123.68), dtype=np.float32)
DATA_LIST_PATH = '/home/yaaaaa0127/ADEChallengeData2016/list/train_list2.txt'
BATCH_SIZE = 48
IGNORE_LABEL = 0
INPUT_SIZE = '480,480'
LEARNING_RATE = 1e-3
MOMENTUM = 0.9
NUM_CLASSES = 27
NUM_STEPS = 60001
POWER = 0.9
RANDOM_SEED = 1234
WEIGHT_DECAY = 0.0001
PRETRAINED_MODEL = './model/icnet_cityscapes_trainval_90k_bnnomerge.npy'
SNAPSHOT_DIR = './snapshots/'
SAVE_NUM_IMAGES = 4
SAVE_PRED_EVERY = 50
# Loss Function = LAMBDA1 * sub4_loss + LAMBDA2 * sub24_loss + LAMBDA3 * sub124_loss
LAMBDA1 = 0.16
LAMBDA2 = 0.4
LAMBDA3 = 1.0
def get_arguments():
parser = argparse.ArgumentParser(description="ICNet")
parser.add_argument("--data-list", type=str, default=DATA_LIST_PATH,
help="Path to the file listing the images in the dataset.")
parser.add_argument("--batch-size", type=int, default=BATCH_SIZE,
help="Number of images sent to the network in one step.")
parser.add_argument("--ignore-label", type=int, default=IGNORE_LABEL,
help="The index of the label to ignore during the training.")
parser.add_argument("--input-size", type=str, default=INPUT_SIZE,
help="Comma-separated string with height and width of images.")
parser.add_argument("--learning-rate", type=float, default=LEARNING_RATE,
help="Base learning rate for training with polynomial decay.")
parser.add_argument("--momentum", type=float, default=MOMENTUM,
help="Momentum component of the optimiser.")
parser.add_argument("--num-classes", type=int, default=NUM_CLASSES,
help="Number of classes to predict (including background).")
parser.add_argument("--num-steps", type=int, default=NUM_STEPS,
help="Number of training steps.")
parser.add_argument("--random-mirror", action="store_true",
help="Whether to randomly mirror the inputs during the training.")
parser.add_argument("--random-scale", action="store_true",
help="Whether to randomly scale the inputs during the training.")
parser.add_argument("--restore-from", type=str, default=PRETRAINED_MODEL,
help="Where restore model parameters from.")
parser.add_argument("--power", type=float, default=POWER,
help="Decay parameter to compute the learning rate.")
parser.add_argument("--save-pred-every", type=int, default=SAVE_PRED_EVERY,
help="Save summaries and checkpoint every often.")
parser.add_argument("--snapshot-dir", type=str, default=SNAPSHOT_DIR,
help="Where to save snapshots of the model.")
parser.add_argument("--weight-decay", type=float, default=WEIGHT_DECAY,
help="Regularisation parameter for L2-loss.")
parser.add_argument("--update-mean-var", action="store_true",
help="whether to get update_op from tf.Graphic_Keys")
parser.add_argument("--train-beta-gamma", action="store_true",
help="whether to train beta & gamma in bn layer")
return parser.parse_args()
def save(saver, sess, logdir, step):
model_name = 'model.ckpt'
checkpoint_path = os.path.join(logdir, model_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
saver.save(sess, checkpoint_path, global_step=step)
print('The checkpoint has been created.')
def load(saver, sess, ckpt_path):
saver.restore(sess, ckpt_path)
print("Restored model parameters from {}".format(ckpt_path))
def get_mask(gt, num_classes, ignore_label):
less_equal_class = tf.less_equal(gt, num_classes-1)
not_equal_ignore = tf.not_equal(gt, ignore_label)
mask = tf.logical_and(less_equal_class, not_equal_ignore)
indices = tf.squeeze(tf.where(mask), 1)
return indices
def create_loss(output, label, num_classes, ignore_label):
raw_pred = tf.reshape(output, [-1, num_classes])
label = prepare_label(label, tf.stack(output.get_shape()[1:3]), num_classes=num_classes, one_hot=False)
label = tf.reshape(label, [-1,])
indices = get_mask(label, num_classes, ignore_label)
gt = tf.cast(tf.gather(label, indices), tf.int32)
pred = tf.gather(raw_pred, indices)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=pred, labels=gt)
reduced_loss = tf.reduce_mean(loss)
return reduced_loss
def main():
"""Create the model and start the training."""
args = get_arguments()
h, w = map(int, args.input_size.split(','))
input_size = (h, w)
coord = tf.train.Coordinator()
with tf.name_scope("create_inputs"):
reader = ImageReader(
' ',
args.data_list,
input_size,
args.random_scale,
args.random_mirror,
args.ignore_label,
IMG_MEAN,
coord)
image_batch, label_batch = reader.dequeue(args.batch_size)
net = ICNet_BN({'data': image_batch}, is_training=True, num_classes=args.num_classes)
sub4_out = net.layers['sub4_out']
sub24_out = net.layers['sub24_out']
sub124_out = net.layers['conv6_cls']
restore_var = tf.global_variables()
all_trainable = [v for v in tf.trainable_variables() if ('beta' not in v.name and 'gamma' not in v.name) or args.train_beta_gamma]
loss_sub4 = create_loss(sub4_out, label_batch, args.num_classes, args.ignore_label)
loss_sub24 = create_loss(sub24_out, label_batch, args.num_classes, args.ignore_label)
loss_sub124 = create_loss(sub124_out, label_batch, args.num_classes, args.ignore_label)
l2_losses = [args.weight_decay * tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'weights' in v.name]
reduced_loss = LAMBDA1 * loss_sub4 + LAMBDA2 * loss_sub24 + LAMBDA3 * loss_sub124 + tf.add_n(l2_losses)
# Using Poly learning rate policy
base_lr = tf.constant(args.learning_rate)
step_ph = tf.placeholder(dtype=tf.float32, shape=())
learning_rate = tf.scalar_mul(base_lr, tf.pow((1 - step_ph / args.num_steps), args.power))
# Gets moving_mean and moving_variance update operations from tf.GraphKeys.UPDATE_OPS
if args.update_mean_var == False:
update_ops = None
else:
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
opt_conv = tf.train.MomentumOptimizer(learning_rate, args.momentum)
grads = tf.gradients(reduced_loss, all_trainable)
train_op = opt_conv.apply_gradients(zip(grads, all_trainable))
# Set up tf session and initialize variables.
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
init = tf.global_variables_initializer()
sess.run(init)
# Saver for storing checkpoints of the model.
saver = tf.train.Saver(var_list=tf.global_variables(), max_to_keep=5)
ckpt = tf.train.get_checkpoint_state(args.snapshot_dir)
if ckpt and ckpt.model_checkpoint_path:
loader = tf.train.Saver(var_list=restore_var)
load_step = int(os.path.basename(ckpt.model_checkpoint_path).split('-')[1])
load(loader, sess, ckpt.model_checkpoint_path)
else:
print('Restore from pre-trained model...')
net.load(args.restore_from, sess)
# Start queue threads.
threads = tf.train.start_queue_runners(coord=coord, sess=sess)
# Iterate over training steps.
for step in range(args.num_steps):
start_time = time.time()
feed_dict = {step_ph: step}
if step % args.save_pred_every == 0:
loss_value, loss1, loss2, loss3, _ = sess.run([reduced_loss, loss_sub4, loss_sub24, loss_sub124, train_op], feed_dict=feed_dict)
save(saver, sess, args.snapshot_dir, step)
else:
loss_value, loss1, loss2, loss3, _ = sess.run([reduced_loss, loss_sub4, loss_sub24, loss_sub124, train_op], feed_dict=feed_dict)
duration = time.time() - start_time
print('step {:d} \t total loss = {:.3f}, sub4 = {:.3f}, sub24 = {:.3f}, sub124 = {:.3f} ({:.3f} sec/step)'.format(step, loss_value, loss1, loss2, loss3, duration))
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
main()
|
[
"hellochick@github.com"
] |
hellochick@github.com
|
eae1d899edf021df1fac6a2dc7310d0971589729
|
ffd1aafa2a6eb691f6772189e66b7e4c05994c38
|
/test/testcaptureinteractiverun.py
|
a2a37f3a5ad0725ca43e9ad70a0defdfbd73c276
|
[
"BSD-3-Clause"
] |
permissive
|
lorin/umdinst
|
d051a34a18dcac55c3dcf604bda1926f2d1e57d8
|
5135f4766e02092d786747b7b0f8d9ea98434da6
|
refs/heads/master
| 2020-05-20T04:56:48.581779
| 2015-03-15T01:15:46
| 2015-03-15T01:15:46
| 32,239,305
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,027
|
py
|
import unittest
import sys
import os
import errno
from xml.dom import minidom
sys.path.append('bin')
from umdinst import wrap
from testsuccessfulcompiledata import getfield, timezonecheck, xmlifystring
from testcapturecompile import programcheck
class TestCaptureInteractiveRun(unittest.TestCase):
def setUp(self):
# Create a subdirectory to hold the log file
dirname = 'testcaptureinteractiverun'
self.logfiledir = os.path.abspath(os.path.join('.',dirname))
os.mkdir(self.logfiledir)
self.runprog = '/bin/echo'
programcheck(self.runprog)
def tearDown(self):
# Remove the subject logfile, if it exists
try:
os.unlink(wrap.getlogfilepath(self.logfiledir))
except OSError, e:
if e.errno!=2:
raise e
os.rmdir(self.logfiledir)
def testCapture(self):
logfile = wrap.getlogfilepath(self.logfiledir)
wrap.capture_interactive_run(self.runprog,logex=logfile)
dom = minidom.parse(logfile)
self.assertEquals(dom.documentElement.tagName,'job')
if __name__ == '__main__':
unittest.main()
|
[
"lorinh@57befb38-bc2d-11de-9259-dba4afbb0da9"
] |
lorinh@57befb38-bc2d-11de-9259-dba4afbb0da9
|
9d9f20d6143b1771b42f5338c9a68521d7b36154
|
23a0c46a29e551c662ec91fa3c248a0f717b2688
|
/020_valid_parentheses/Solution.py
|
060d3d0fed2c01af0866d9d713cb28b4c92df193
|
[] |
no_license
|
zcybupt/leetcode
|
429e752f6b0210af7c89afd866bb170062fe27f0
|
d897b493dbc7b23d35be7400266cffcc2735efdd
|
refs/heads/master
| 2023-01-12T20:26:39.874101
| 2020-11-15T07:51:18
| 2020-11-15T07:51:18
| 284,205,571
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 407
|
py
|
class Solution:
def isValid(self, s: str) -> bool:
mapping = {')': '(', ']': '[', '}': '{'}
stack = []
for ch in s:
if stack and stack[-1] == mapping.get(ch):
stack.pop()
continue
stack.append(ch)
return len(stack) == 0
if __name__ == '__main__':
solution = Solution()
print(solution.isValid('()[]{}'))
|
[
"jp2016213431@qmul.ac.uk"
] |
jp2016213431@qmul.ac.uk
|
f05d6e62fb191e2e7fe1eefcf8d36813bcd5d18e
|
4ddb84f7cdbe6e3cd3fce2061e5ce62322900474
|
/271A_beautiful_year.py
|
6d729ab610746da1d405844b9f02061d120e8ae8
|
[] |
no_license
|
nickzuck/Codeforces
|
15ae65754d98e91a1bfe0f50991dca3601f04937
|
a502c62d5e6fd681b44cb1f076db1252cbeac734
|
refs/heads/master
| 2021-05-03T15:19:35.416332
| 2021-03-02T05:02:07
| 2021-03-02T05:02:07
| 44,151,235
| 0
| 0
| null | 2017-12-02T06:37:35
| 2015-10-13T04:08:49
|
C++
|
UTF-8
|
Python
| false
| false
| 122
|
py
|
n = int(raw_input())
n+= 1
while(n < 90000):
s = str(n)
if (len(set(s)) == len(s)):
break ;
else:
n += 1
print n
|
[
"nikhil1945.singh@gmail.com"
] |
nikhil1945.singh@gmail.com
|
c443cf6c5e003484bfc979dde26da44f6404272c
|
eb74ce2c559966d7c4775fb1f1004752b955e958
|
/country.py
|
aee71a84179d49c36218267849247925130ee84b
|
[] |
no_license
|
Gabrielle1399/Country-Information
|
9c20ec7f37ee1533bb6ad0e51375e2d4fda8ba6c
|
e296246d0216a756bd02975c1d8f53b2fab55be1
|
refs/heads/master
| 2020-04-03T14:35:33.066828
| 2018-10-30T05:05:16
| 2018-10-30T05:05:16
| 155,327,322
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
# A class Country holds the information about a single country.
class Country:
def __init__(self, name, pop, area, continent): # Constructor including four instance variables.
self._name = name
self._population = int(pop)
self._area = float(area)
self._continent = continent
# A method to get country's name.
def getName(self):
return self._name
# A method to get country's population.
def getPopulation(self):
return self._population
# A metohod to get country's area.
def getArea(self):
return self._area
# A method to get the continent.
def getContinent(self):
return self._continent
# A method to set new population.
def setPopulation(self, pop):
self._population = pop
# A method to set new area.
def setArea(self, area):
self._area = area
# A method to set new continent.
def setContinent(self, continent):
self._continent = continent
# A method to get population density.
def getPopDensity(self, pop, area):
self._density = pop/area
return "%.2f" % self._density
# Generates a string representation for class objects.
def __repr__(self):
return str(self._name) + "(pop: " + str(self._population) + ", size: " + str(self._area) + ") in " + self._continent
|
[
"noreply@github.com"
] |
Gabrielle1399.noreply@github.com
|
97240c6a429bb34149dc420d426cc169f9241189
|
fd9e958508a6ee288bb99b04f8ab0010968b5f26
|
/bme280.py
|
7a8b7cedaa49e182e718349478cc5939f7e251b8
|
[] |
no_license
|
sleepypioneer/pyladies
|
8374b91c203f37a8790ebd30f9b37327b9327277
|
ff0d8e4da25edc6a83256ae32748f6e2bb12b24f
|
refs/heads/master
| 2020-05-17T20:46:49.646158
| 2019-01-22T18:27:45
| 2019-01-22T18:27:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,014
|
py
|
# Authors: Paul Cunnane 2016, Peter Dahlebrg 2016
#
# This module borrows from the Adafruit BME280 Python library. Original
# Copyright notices are reproduced below.
#
# Those libraries were written for the Raspberry Pi. This modification is
# intended for the MicroPython and esp8266 boards.
#
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
#
# Based on the BMP280 driver with BME280 changes provided by
# David J Taylor, Edinburgh (www.satsignal.eu)
#
# Based on Adafruit_I2C.py created by Kevin Townsend.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
from ustruct import unpack, unpack_from
from array import array
# BME280 default address.
BME280_I2CADDR = 0x76
# Operating Modes
BME280_OSAMPLE_1 = 1
BME280_OSAMPLE_2 = 2
BME280_OSAMPLE_4 = 3
BME280_OSAMPLE_8 = 4
BME280_OSAMPLE_16 = 5
BME280_REGISTER_CONTROL_HUM = 0xF2
BME280_REGISTER_CONTROL = 0xF4
class BME280:
def __init__(self,
mode=BME280_OSAMPLE_1,
address=BME280_I2CADDR,
i2c=None,
**kwargs):
# Check that mode is valid.
if mode not in [BME280_OSAMPLE_1, BME280_OSAMPLE_2, BME280_OSAMPLE_4,
BME280_OSAMPLE_8, BME280_OSAMPLE_16]:
raise ValueError(
'Unexpected mode value {0}. Set mode to one of '
'BME280_ULTRALOWPOWER, BME280_STANDARD, BME280_HIGHRES, or '
'BME280_ULTRAHIGHRES'.format(mode))
self._mode = mode
self.address = address
if i2c is None:
raise ValueError('An I2C object is required.')
self.i2c = i2c
# load calibration data
dig_88_a1 = self.i2c.readfrom_mem(self.address, 0x88, 26)
dig_e1_e7 = self.i2c.readfrom_mem(self.address, 0xE1, 7)
self.dig_T1, self.dig_T2, self.dig_T3, self.dig_P1, \
self.dig_P2, self.dig_P3, self.dig_P4, self.dig_P5, \
self.dig_P6, self.dig_P7, self.dig_P8, self.dig_P9, \
_, self.dig_H1 = unpack("<HhhHhhhhhhhhBB", dig_88_a1)
self.dig_H2, self.dig_H3 = unpack("<hB", dig_e1_e7)
e4_sign = unpack_from("<b", dig_e1_e7, 3)[0]
self.dig_H4 = (e4_sign << 4) | (dig_e1_e7[4] & 0xF)
e6_sign = unpack_from("<b", dig_e1_e7, 5)[0]
self.dig_H5 = (e6_sign << 4) | (dig_e1_e7[4] >> 4)
self.dig_H6 = unpack_from("<b", dig_e1_e7, 6)[0]
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL,
bytearray([0x3F]))
self.t_fine = 0
# temporary data holders which stay allocated
self._l1_barray = bytearray(1)
self._l8_barray = bytearray(8)
self._l3_resultarray = array("i", [0, 0, 0])
def read_raw_data(self, result):
""" Reads the raw (uncompensated) data from the sensor.
Args:
result: array of length 3 or alike where the result will be
stored, in temperature, pressure, humidity order
Returns:
None
"""
self._l1_barray[0] = self._mode
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL_HUM,
self._l1_barray)
self._l1_barray[0] = self._mode << 5 | self._mode << 2 | 1
self.i2c.writeto_mem(self.address, BME280_REGISTER_CONTROL,
self._l1_barray)
sleep_time = 1250 + 2300 * (1 << self._mode)
sleep_time = sleep_time + 2300 * (1 << self._mode) + 575
sleep_time = sleep_time + 2300 * (1 << self._mode) + 575
time.sleep_us(sleep_time) # Wait the required time
# burst readout from 0xF7 to 0xFE, recommended by datasheet
self.i2c.readfrom_mem_into(self.address, 0xF7, self._l8_barray)
readout = self._l8_barray
# pressure(0xF7): ((msb << 16) | (lsb << 8) | xlsb) >> 4
raw_press = ((readout[0] << 16) | (readout[1] << 8) | readout[2]) >> 4
# temperature(0xFA): ((msb << 16) | (lsb << 8) | xlsb) >> 4
raw_temp = ((readout[3] << 16) | (readout[4] << 8) | readout[5]) >> 4
# humidity(0xFD): (msb << 8) | lsb
raw_hum = (readout[6] << 8) | readout[7]
result[0] = raw_temp
result[1] = raw_press
result[2] = raw_hum
def read_compensated_data(self, result=None):
""" Reads the data from the sensor and returns the compensated data.
Args:
result: array of length 3 or alike where the result will be
stored, in temperature, pressure, humidity order. You may use
this to read out the sensor without allocating heap memory
Returns:
array with temperature, pressure, humidity. Will be the one from
the result parameter if not None
"""
self.read_raw_data(self._l3_resultarray)
raw_temp, raw_press, raw_hum = self._l3_resultarray
# temperature
var1 = ((raw_temp >> 3) - (self.dig_T1 << 1)) * (self.dig_T2 >> 11)
var2 = (((((raw_temp >> 4) - self.dig_T1) *
((raw_temp >> 4) - self.dig_T1)) >> 12) * self.dig_T3) >> 14
self.t_fine = var1 + var2
temp = (self.t_fine * 5 + 128) >> 8
# pressure
var1 = self.t_fine - 128000
var2 = var1 * var1 * self.dig_P6
var2 = var2 + ((var1 * self.dig_P5) << 17)
var2 = var2 + (self.dig_P4 << 35)
var1 = (((var1 * var1 * self.dig_P3) >> 8) +
((var1 * self.dig_P2) << 12))
var1 = (((1 << 47) + var1) * self.dig_P1) >> 33
if var1 == 0:
pressure = 0
else:
p = 1048576 - raw_press
p = (((p << 31) - var2) * 3125) // var1
var1 = (self.dig_P9 * (p >> 13) * (p >> 13)) >> 25
var2 = (self.dig_P8 * p) >> 19
pressure = ((p + var1 + var2) >> 8) + (self.dig_P7 << 4)
# humidity
h = self.t_fine - 76800
h = (((((raw_hum << 14) - (self.dig_H4 << 20) -
(self.dig_H5 * h)) + 16384)
>> 15) * (((((((h * self.dig_H6) >> 10) *
(((h * self.dig_H3) >> 11) + 32768)) >> 10) +
2097152) * self.dig_H2 + 8192) >> 14))
h = h - (((((h >> 15) * (h >> 15)) >> 7) * self.dig_H1) >> 4)
h = 0 if h < 0 else h
h = 419430400 if h > 419430400 else h
humidity = h >> 12
if result:
result[0] = temp
result[1] = pressure
result[2] = humidity
return result
return array("i", (temp, pressure, humidity))
@property
def values(self):
""" human readable values """
t, p, h = self.read_compensated_data()
p = p // 256
pi = p // 100
pd = p - pi * 100
hi = h // 1024
hd = h * 100 // 1024 - hi * 100
# return t, pd, hd
return ("{}".format(t / 100), "{}.{:02d}".format(pi, pd),
"{}.{:02d}".format(hi, hd))
|
[
"pedrovazsa@gmail.com"
] |
pedrovazsa@gmail.com
|
64f5032d9149c41eebe5df6c1672f60f946ebab7
|
bb0be98987c0e33bf7a094c94bfa31cdf8e2231d
|
/utils/action.py
|
e3c25bb4a03a76c5b948ceedb1921369988b91f7
|
[] |
permissive
|
whklwhkl/KCFpy
|
2ed767963a3e95819c655366defc2cd4882e1501
|
308344d78415150fc16e69d23cb3432d565f23a2
|
refs/heads/master
| 2020-06-18T03:07:57.864286
| 2020-02-11T08:50:24
| 2020-02-11T08:50:24
| 196,146,163
| 0
| 0
|
MIT
| 2019-07-10T06:34:37
| 2019-07-10T06:34:37
| null |
UTF-8
|
Python
| false
| false
| 1,972
|
py
|
import torch
import cv2
import numpy as np
def cut(img, bbox):
x1, y1, w, h = map(int, bbox)
height, width, _ = img.shape
xc = x1 + w //2
yc = y1 + h // 2
xlength = ylength = min(max(w, h), width, height) // 2
if xc - xlength < 0 and xc + xlength < width - 1:
xx1 = 0
xx2 = xlength*2
elif xc - xlength > 0 and xc + xlength > width - 1:
xx1 = width - 1 - xlength*2
xx2 = width - 1
elif xc - xlength < 0 and xc + xlength > width -1:
xx1 = 0
xx2 = width - 1
else:
xx1 = xc - xlength
xx2 = xc + xlength
if yc - ylength < 0 and yc + ylength < height - 1:
yy1 = 0
yy2 = ylength*2
elif yc - ylength > 0 and yc + ylength > height - 1:
yy1 = height - 1 - ylength*2
yy2 = height - 1
elif yc - ylength < 0 and yc + ylength > height - 1:
yy1 = 0
yy2 = height - 1
else:
yy1 = yc - ylength
yy2 = yc + ylength
return img[yy1:yy2, xx1:xx2, :]
def frames2batch(f_imgs):
ids = []
i = 0
for k, v in f_imgs.items():
ids.append(k)
if i == 0:
simg = frames2data(v)
else:
simg = torch.cat((simg, frames2data(v)), 0)
i += 1
return ids, simg.half().cuda()
mean = np.array([104, 117, 128], np.uint8)[None, None]
def frames2data(frames):
simg = []
for i, frame in enumerate(frames):
h, w = frame.shape[:2]
scale_factor = 256 / h
frame = cv2.resize(frame, (int(w * scale_factor + 0.5), int(h * scale_factor + 0.5)))
h1, w1 = frame.shape[:2]
tw = 224
th = 224
x1 = (w1 - tw) // 2
y1 = (h1 - th) // 2
box = np.array([x1, y1, x1 + tw, y1 + th])
frame = frame[y1: y1 + th, x1: x1 + tw, :]
frame -= mean
img = frame.astype(np.float16).transpose([2, 0, 1])
simg.append(img)
ssimg = np.stack(simg)[None]
return ssimg
|
[
"phiiipwong@163.com"
] |
phiiipwong@163.com
|
c1271b587621587983dfe21212516541641bdf8e
|
022825d92396c6d8f10b3fd253245b8a6d435151
|
/ZHEKUN/4_Divide_Day_in_Person.py
|
fdbfa34e4af288f6c364cc2c8485f637779909e8
|
[] |
no_license
|
ZhekunXIONG/Andorra_RNC
|
759e8f1ddf70954d40cdc4a008dfaeb79d38ccf8
|
b38043adc64f3f01949f0cf61ce0b4616215f305
|
refs/heads/master
| 2021-09-10T17:56:00.532946
| 2018-01-10T19:15:25
| 2018-01-10T19:15:25
| 116,999,235
| 0
| 0
| null | 2018-01-10T19:08:00
| 2018-01-10T19:08:00
| null |
UTF-8
|
Python
| false
| false
| 1,145
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sep 25 2017
Go through each day's csv file, and produce of csv files of each ID,
for further calculating staypoints of each individual, see "5_Person_Staypoints.py"
@author: Zhekun Xiong
"""
import pandas as pd
import os
import time
import pyproj
import numpy as np
import calendar
import json
import datetime
import csv
# Make a folder to store our output
os.makedirs("outputcsv_date_combined_person")
for filename in os.listdir("outputcsv_date_combined"):
if ".csv" in filename:
date = filename[:-4]
# Make a date sub folder to store our output
os.makedirs("outputcsv_date_combined_person/"+date)
with open("outputcsv_date_combined/"+filename, mode = 'r') as infile:
df = pd.read_csv(infile, sep=';')
# categorize the datapoints by ID, and produce one csv file for each ID
for person, df_id in df.groupby('id'):
df_id.to_csv("outputcsv_date_combined_person/"+date+"/"+person+".csv", sep=";", encoding='utf-8', index=False)
print(filename + "_done")
|
[
"noreply@github.com"
] |
ZhekunXIONG.noreply@github.com
|
18c7196e36b04879420c799c690b3936ca305a0c
|
78f30edb2c8a80d5e6ba5cfb4757b737a5b6017e
|
/src/assets/parseJson.py
|
d22617fbcb0d3d8ea226de779fafeaa2f002c20e
|
[] |
no_license
|
ADFC-Hamburg/adfc-t30-paten-frontend
|
416b869197156b679fcc57faa224fd2097967099
|
e1a08f87c9a9d899334da1309c5ae59dfe290ed8
|
refs/heads/master
| 2023-05-14T09:29:22.517436
| 2021-10-30T06:54:04
| 2021-10-30T06:54:04
| 163,592,768
| 0
| 2
| null | 2023-04-30T03:54:47
| 2018-12-30T14:19:35
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 883
|
py
|
#!/usr/bin/python3
import json
from pprint import pprint
dateien= {
1:'trotzdem_nicht_Tempo30.json',
2:'KeinTempo30ohne.json',
3:'tempo_30_neu.json',
}
data = []
newid = 0
for key, dateiname in dateien.items():
print(key)
f=open(dateiname,"rt")
indata=json.load(f)
f.close()
for ele in indata['features']:
neu = ele['properties']
neu['art']=int(neu['A'])
del(neu['A'])
neu['Strasse']=neu['Straße']
del(neu['Straße'])
try:
del(neu['C'])
except KeyError:
pass
del(neu['Nr.'])
neu['id'] = newid
newid = newid +1
neu['lat'] = ele['geometry']['coordinates'][1]
neu['lon'] = ele['geometry']['coordinates'][0]
neu['tempo30'] = int(key)
data.append(neu)
with open('sozEinr.json', 'w') as fp:
json.dump(data, fp)
|
[
"sven@anders-hamburg.de"
] |
sven@anders-hamburg.de
|
edb21e9d75e0ae1423bcf30af98e52b306b20cdb
|
b56477c3d204306639f667d1e150f62278b09374
|
/tbbi_regression/commonUtility.py
|
f55035feb68e9ffe365dec33c188d4790481f1de
|
[] |
no_license
|
cobbxia/e2e
|
5220ec72bc15a7d9e30e9fbb48e3b745a498679c
|
ec28b6167403e02eedd53cc42c5a45876bf08ad9
|
refs/heads/master
| 2021-01-15T11:43:41.915420
| 2017-01-16T03:52:21
| 2017-01-16T03:52:21
| 78,832,934
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 25,295
|
py
|
#!coding=utf8
from gevent.subprocess import Popen, PIPE
from base64 import encodestring
from hmac import new as hmac_new
import subprocess,os,sys,socket,inspect, unittest, urllib, hashlib, time, random, re, sys,traceback
g_debug = False
def do_mysql_cmd(gc,sql):
cmd='''%s -N -e "%s" ''' % (gc.mysql,sql)
gc.log.info(cmd)
r=do_cmd(cmd)
if(r[0]!=0):
gc.log.fatal("cmd:%s\nstdout:%s\nstderr:%s\n" % (cmd,r[1],r[2]))
return 1
try:
cnt=int(r[1].strip()[0])
except IndexError as e:
cnt=0
if cnt>0:
gc.log.info("sql:%s has record in database." % (sql))
return cnt
return 0
#返回1 是分区表
def isparted(gc,odps,projectname,tablename):
ret=0
hql='''desc %s.%s;''' % (projectname,tablename)
r=do_odps_filecmd(gc,odps,hql)
#print(r[1])
#print(r[2])
if r[0] == 0:
for line in r[1].split("\n"):
if line.find("Partition Columns") >= 0:
ret=1
return ret
def do_remote_cmd(gc,cmd):
r=(1,"","")
try:
if gc.remote == "yes" and len(gc.executorList) != 0:
executor=getExecutor(gc.executorList,gc)
if executor is None or executor == "":
gc.log.info("no executor found,remote executed error!")
return r
cmd='''ssh %s "%s"'''% (executor,cmd)
gc.log.info("cmd:%s" % (cmd))
r=do_cmd(cmd)
if r[0] == 0 :
gc.log.debug("success:%s" % (cmd))
except Exception as e:
gc.log.fatal("error cmd:%s sql:%s r1:%s r2:%s Exception:%s" % (cmd,sql,r[1],r[2],traceback.format_exc()))
return r
def LogWrapper(function):
@functools.wraps(function)
def wrap_function(*args, **kwargs):
GlobalConf().log.info("%s() begins" % (function.__name__))
ret=function(*args, **kwargs)
GlobalConf().log.info("%s() over" % (function.__name__))
return ret
return wrap_function
class Singleton(object):
def __new__(cls,*args,**kw):
if not hasattr(cls,'_instance'):
orig=super(Singleton,cls)
cls._instance=orig.__new__(cls,*args,**kw)
return cls._instance
def getNonDict(filterfile):
if not os.path.exists(filterfile) or filterfile == "" or filterfile is None: return {}
return pickle.load(open(filterfile,"rb"))
def genNonDict(infile):
outdict={}
outfile=infile.split(".txt")[0]+".pk"
for line in open(infile,"rb"):
#print(line)
#lines=line.strip("\n").split()
#lineLength=len(lines)
#k=lines[0]
#v=lines[1]
outdict[line]=""
pickle.dump(outdict,open(outfile,"wb"))
return outfile
def addpart(tablename,partname,gc,projectname,odps=""):
if odps=="": odps=gc.kodps
addpartSql="use %s;alter table %s add if not exists partition(%s);" % (projectname,tablename,partname)
gc.log.debug("addpartSql:%s" % (addpartSql))
r=do_odps_filecmd(gc,odps,addpartSql)
#将不是gc.currentday的分区转换为currentday分区,以此实现分区的转换
# select * from partition where part not like '%ds=%' and part not like '%pt=%' and part not like '%dt=%' and part !="" and part is not NULL ;
#分区以ds/dt/pt等开头
#2014-10-01这样的分区数量很少,不超过5个
def partTransfer(gc,oldpart):
r=getDayPart(gc,oldpart)
partprefix=oldpart.split('=')[0]
oldpart=oldpart.split('=')[1]
gc.log.debug("oldpart:%s " % (oldpart))
newpart=""
if r[0] != 0 or len(oldpart) < len(gc.currentday):
newpart=oldpart
elif oldpart.find('-') >=0:
newpart="%s-%s-%s" % (gc.currentday[0:4],gc.currentday[4:6],gc.currentday[6:8])
elif oldpart.endswith("'"):
newpart="'"+gc.currentday+oldpart[9:]
elif oldpart.endswith("\""):
newpart="\""+gc.currentday+oldpart[9:]
else:
newpart=gc.currentday+oldpart[8:]
newpart=partprefix+"="+newpart
gc.log.debug("newpart:%s " % (newpart))
return newpart
#比较两个分区的时间,如果小于则返回0
def issmaller(curday,today):
curday=curday.replace('-','')
curlen=len(curday)
tolen=len(today)
if curlen >= tolen:
for i in range(tolen):
if curday[i] != today[i]:
return 1
return 0
#日期是否相同,相同返回0,不同返回1
def equal(curday,today):
curday=curday.replace('-','')
curlen=len(curday)
tolen=len(today)
ret=0
if curlen >= tolen:
for i in range(tolen):
if curday[i] != today[i]:
ret=1
else:
ret=1
return ret
#是否是以日期为单位,
#如果是天为单位返回0,
#如果有小时分区返回1
#不是日期格式的其他表返回2,
#周表返回3,
#月表返回4
#只有日表和小时表、其他表才进行处理,但是预处理过程中发现其实并不存在小时表,所以只处理2和0
def getDayPart(gc,mi):
if mi=="" or mi is None:
return (3,None,None)
firstPart=mi.split("/")[0]
gc.log.debug("firstPart:%s" % (firstPart))
firstPart=mi.split("=")[1]
gc.log.debug("firstPart:%s" % (firstPart))
yList=re.split(r'[0-9]{4}',firstPart.replace("-","").strip())
if len(yList)==2 and yList[0] == '' and yList[1] == '':
gc.log.info("part:%s is year part,skip" % (mi))
return (3,None,None)
mList=re.split(r'[0-9]{6}',firstPart.replace("-","").strip())
if len(mList)==2 and mList[0] == '' and mList[1] == '':
gc.log.info("part:%s is month part,skip" % (mi))
return (4,None,None)
la=re.split(r'[0-9]{8}',firstPart.replace("-","").strip())
gc.log.debug("la:%s" % (la))
if len(la) <= 1:
gc.log.info("part:%s never has a day part" % (la))
return (2,None,None)
bdate=firstPart[0:8]
gc.log.debug("bdate:%s" % (bdate))
val=la[1]
if re.search(r'[1-9]{1,6}',val):
gc.log.fatal("has time partition:%s" % (mi))
return (1,bdate,mi)
else:
gc.log.info("day part:%s" % (mi))
return (0,bdate,mi)
def getSize(gc,tablename,partname):
r=(1,'','')
size=0
i=0
gc.log.debug("tablename:%s,partname:%s" % (tablename,partname))
while i < gc.retrytimes:
i=i+1
try:
#executor=getExecutor(gc.executorList)
if os.path.exists(self.gc.killfile) :
gc.log.info("killfile exists,execution over. filename:%s" % (gc.killfile))
break
if partname is not None and partname != "":
cmd='''%s -z %s -q %s ''' % (gc.ddl,tablename,partname)
else:
cmd='''%s -z %s''' % (gc.ddl,tablename)
gc.log.debug("cmd:%s" % (cmd))
r=do_cmd(cmd)
if r[0] == 0:
gc.log.debug("success:%s size:%s" % (cmd,r[1]))
size=int(r[1])
break
elif os.path.exists(gc.killfile):
gc.log.debug("%s exists,exit" % (gc.killfile))
size=-3
break
else:
gc.log.error("error retytimes:%d cmd:%s ,r1:%s " % (i,cmd,r[1]))
except Exception as e:
gc.log.fatal("error cmd:%s r1:%s Exception: %s" % (cmd,r[1],traceback.format_exc()))
return size
def getExecutor(executorList,gc):
if gc.remote == "no":
return ""
if executorList is None or len(executorList)==0:
return ""
rndindex=random.randint(0,len(executorList)-1)
return executorList[rndindex]
def randomStr():
seed = "1234567890"
sa = []
for i in range(8):
sa.append(random.choice(seed))
salt = ''.join(sa)
return salt
def do_odps_cmd(gc,odps,sql,retrytimes=""):
if retrytimes is None or retrytimes=="":
retrytimes=gc.retrytimes
r=(1,'','')
cmd='''%s -e "%s" ''' % (odps,sql)
gc.log.debug(cmd)
i=0
while i < retrytimes:
if os.path.exists(gc.killfile):
gc.log.debug("%s exists,exit" % (gc.killfile))
break
i=i+1
try:
r=do_cmd(cmd)
if r[0] == 0:
gc.log.debug("success:%s" % (cmd))
break
elif i < retrytimes:
gc.log.error("cmd retrytimes:%d error:%s r1:%s r2:%s" % (i,cmd,r[1],r[2]))
except Exception as e:
gc.log.fatal("['error', ('cmd:%s r1:%s r2:%s Exception: %s')]" % (cmd,r[1],r[2],traceback.format_exc()))
return r
def do_odps_filecmd(gc,odps,sql,outfp=subprocess.PIPE,errfp=subprocess.PIPE):
executor = ""
if gc.remote == "yes" and len(gc.executorList) != 0:
executor=getExecutor(gc.executorList,gc)
if executor is None or executor == "":
gc.log.info("no executor found,executed on local matchine")
r=(1,'','')
i=0
gc.log.info("sql:%s,executor:%s" % (sql,executor))
while i < gc.retrytimes:
if os.path.exists(gc.killfile):
gc.log.debug("%s exists,exit" % (gc.killfile))
break
i=i+1
cmdfilename="/tmp/"+randomStr()
open(cmdfilename,"w").write(sql)
cmd='''%s -f %s''' % (odps,cmdfilename)
if executor != "":
scpcmd="scp %s %s:%s" % (cmdfilename,executor,cmdfilename)
gc.log.debug("scpcmd:%s" % (scpcmd))
do_cmd(scpcmd)
cmd='''ssh %s "%s"'''% (executor,cmd)
gc.log.debug("cmd:%s" % (cmd))
try:
#do_cmd(cmd)
r=do_cmd(cmd,outfp,errfp)
if r[0] == 0 :
gc.log.debug("success:%s" % (cmd))
break
#elif not (r[1].find("There is not row need to copy")>=0 or r[2].find(" There is not row need to copy")>=0):
# gc.log.error("sql:%s r1:%s r2:%s" % (sql,r[1],r[2]))
# break
elif i<gc.retrytimes:
gc.log.error("error retytimes:%d cmd:%s ,sql:%s r1:%s r2:%s" % (i,cmd,sql,r[1],r[2]))
except Exception as e:
gc.log.fatal("error cmd:%s sql:%s r1:%s r2:%s Exception:%s" % (cmd,sql,r[1],r[2],traceback.format_exc()))
finally:
rmcmd="rm -rf %s" % (cmdfilename)
if executor != "":
rmcmd='''ssh %s "%s" ''' % (executor,rmcmd)
gc.log.debug("rmcmd:%s" % (rmcmd))
do_cmd(rmcmd)
return r
def getTableList(gc,sql=""):
filterdict=getNonDict(gc.ignorefile)
if gc.tablefile != "" and gc.tablefile is not None and gc.ismysql == "no":
gc.log.info("generate table list from tablefile:%s" % (gc.tablefile))
gc.tableList=[t.replace("\n","") for t in open(gc.tablefile,"r") if t not in filterdict ]
return gc.tableList
gc.log.info("generate table list from mysql")
if sql == "":
#sql="select name from \`table\` t ,partition p where t.id=p.\`table\` and p.flag=100;"
sql="select name from \`table\`;"
cmd='''%s -N -e "%s" ''' % (gc.mysql,sql)
gc.log.info(cmd)
r=do_cmd(cmd)
if(r[0]!=0):
gc.log.fatal("getTableList error!cmd:%s" % (cmd))
print("getTableList error!cmd:%s" % (cmd))
sys.exit(0)
for line in r[1].split("\n"):
if line not in filterdict:
gc.tableList.append(line)
gc.log.info("total table count from mysql :%d" % (len(gc.tableList)))
def getResouceList(gc):
gc.log.info("generate resource list from file")
for resource in open(gc.resourcefile,"r"):
resource=resource.strip("\n")
if resource == "":
continue
gc.resourceList.append(resource)
gc.log.info("resource loaded")
def getExecutorList(hostFilename):
executorList=[]
if not os.path.exists(hostFilename):
return executorList
fp=open(hostFilename,"r")
for executor in fp.readlines():
if executor == "" or executor is None:
continue
executorList.append(executor.rstrip("\n"))
return executorList
def isToday(curday):
today=gettoday()
curday=curday.replace('-','')
curlen=len(curday)
tolen=len(today)
if curlen >= tolen:
for i in range(tolen):
if curday[i] != today[i]:
return 1
return 0
def gettoday():
return time.strftime('%Y%m%d',time.localtime(time.time()))
def printTime():
nowTime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
print nowTime
def now_time():
return time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
def do_cmd(cmd,stdout=subprocess.PIPE, stderr=subprocess.PIPE):
#p = subprocess.Popen(cmd, shell=True,close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p = Popen(cmd, shell=True,close_fds=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if stdout != subprocess.PIPE:
print(out)
stdout.write(out)
stderr.write(err)
return p.returncode, out, err
def runHiveCmd(cmd):
res=""
r=do_cmd(cmd)
if re.search(r'ok|OK',r[2].decode('utf-8')):
res=[1].decode("utf-8").strip("\n")
return res
def d(karg):
for i in karg:
print(i,karg[i])
def parseResult(column_diff,colorSrc="red",colorDst="green"):
ret_diff='''<!DOCUMENT><html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Koala-log</title>
</head>'''
ret_diff=ret_diff+'<table border="1">'
lineNum=0
itemTotal= 0
for line in column_diff.split("\n"):
itemNum=0
itemFlag=0
if line.strip() == "":
continue
if lineNum==0:
ret_diff=ret_diff+"<tr>"+"".join(['''<th>%s</th>''' % (item) for item in line.split("\t")])+"</tr>"
lineNum=lineNum+1
itemTotal=len(line.split("\t"))
continue
if len(line.split("\t")) != itemTotal:
#ret_diff=ret_diff+"<tr>"+line+"</tr>"
lineNum=lineNum+1
continue
ret_diff=ret_diff+"<tr>"
for item in line.split("\t"):
color="black"
if itemNum == 0:
itemFlag=int(item+"0",2)
elif (1<<(int)(itemNum/2))&itemFlag >0 and itemNum % 2 == 0:
#print(itemFlag,itemNum,(int)(itemNum/2),1<<(int)(itemNum/2),(1<<(int)(itemNum/2))&itemFlag,item)
color=colorSrc
elif (1<<(int)(itemNum/2))&itemFlag >0 and itemNum % 2 != 0:
#print(itemFlag,itemNum,(int)(itemNum/2),1<<(int)(itemNum/2),(1<<(int)(itemNum/2))&itemFlag,item)
color=colorDst
ret_diff=ret_diff+'''<th><p style="color:%s">%s</p></th>''' % (color,item)
itemNum=itemNum+1
ret_diff=ret_diff+"</tr>"
lineNum=lineNum+1
ret_diff=ret_diff+"</table></html>"
return ret_diff
def list2Dict(argList):
retDict={}
for i in range(len(argList)):
k=argList[i].split("=")[0]
v="=".join(argList[i].split("=")[1:])
#print("k:%s\tv:%s" % (k,v))
retDict[k]=v
return retDict
def isNum(a):
if re.match(r"^\-?[\d\.]+$",a):
return 1
else:
return 0
def getComprKey(fieldCompr):
tempComprFieldList = fieldCompr.split(':')
if "key_t2" not in locals():
key_t2=""
if "key_t1" not in locals():
key_t1=""
for singleFieldCompr in tempComprFieldList:
tempKeyT1=singleFieldCompr.split(';')[0]
tempKeyT2=singleFieldCompr.split(';')[1]
if key_t1 == "":
key_t1=tempKeyT1
else:
key_t1=key_t1+";"+tempKeyT1
if key_t2 == "":
key_t2=tempKeyT2
else:
key_t2=key_t2+";"+tempKeyT2
return (key_t1,key_t2)
#def cur_file_dir():
#path=sys.path[0]
#if os.path.isdir(path):
# return path
#elif os.path.isfile(path):
# return os.path.dirname(path)
def cur_file_dir():
path = os.path.realpath(sys.path[0]) # interpreter starter's path
if os.path.isfile(path): # starter is excutable file
path = os.path.dirname(path)
return os.path.abspath(path) # return excutable file's directory
else: # starter is python script
caller_file = inspect.stack()[1][1] # function caller's filename
return os.path.abspath(os.path.dirname(caller_file))# return function caller's file's directory
def getCurRunPosInfo():
try:
raise Exception
except:
exc_info = sys.exc_info()
traceObj = exc_info[2]
frameObj = traceObj.tb_frame
Upframe = frameObj.f_back
return (Upframe.f_code.co_filename, Upframe.f_code.co_name, Upframe.f_lineno)
class REQUEST():
def __init__(self,accessid,accesskey,host):
#self.host = G_ODPS_HOST
self.host = host
self.method = ''
self.path = ''
self.body = ''
self.accessid = accessid
self.accesskey= accesskey
self.headers= {}
self.memo = '' #case的描述
def output(self):
if g_debug:
print ("\n========REQUEST========\nhost:%s\nmethod:%s\npath:%s\nheader:%s\nbody:\n%s"\
% (self.host,self.method,self.path,self.headers,self.body) )
def addhead(self,stra):
key,value = stra.split(':')
self.headers[key.lower()] = value.lower()
def genauth(self):
#import hashlib
#import cgi
#import urlparse
xodps = ""
xkey_list = []
xsort_list = []
for key in self.headers:
if key.find("x-odps-") >= 0:
xkey_list.append(key.strip())
xsort_key = sorted(xkey_list)
for xkey in xsort_key:
if self.headers.get(xkey) != None:
xodps = xodps + xkey+":"+self.headers[xkey] +'\n'
if self.path.find("?") > 0:
dict_query = {}
str_query = self.path[self.path.find("?")+1:]
list_query = str_query.split("&")
sort_query = ""
key_list = []
for item in list_query:
key_value = item.split("=")
key_list.append(key_value[0])
if len(key_value) == 2:
dict_query[key_value[0]] = key_value[1]
else:
dict_query[key_value[0]] = ""
sort_key = sorted(key_list)
#print sort_key
for key in sort_key:
if dict_query[key] == "":
sort_query = sort_query + key +"&"
else:
sort_query = sort_query + key +"=" + dict_query[key] +"&"
list_path = self.path[0:self.path.find("?")]
self.path = self.path[0:self.path.find("?")] + "?"+sort_query[:-1]
else:
pass
if len(self.body.strip()) > 0:
#content_md5 = hashlib.md5(self.body).hexdigest()
content_md5 = ""
else:
content_md5 = ""
try:
content_type = self.headers["content-type"].strip()
except:
content_type = ''
#print self.headers
date = self.headers['Date'].strip()
#self.headers['x-odps-date'] = date
#print self.path
path = self.path[self.path.find("/projects"):]
#print "\npath:"+path
string = self.method.strip() + '\n' \
+ content_md5 + '\n' \
+ content_type + '\n' \
+ date + '\n' \
+ xodps \
+ path
# + "x-odps-date:%s" % date + "\n" \
#print ("\nstring:\n[",string,"]\n")
h = hmac_new(
self.accesskey.encode(),
string.encode(),
hashlib.sha1
)
#print string
signature = encodestring(h.digest()).strip()
#print signature
return signature
class RESPONSE():
def __init__(self):
self.headers = {}
self.status = 0
self.reason = ""
self.version = ""
self.body = ""
def output(self):
if g_debug:
print ("\n========RESPONSE========\nstatus:%s\nheaders:%s\nbody:\n%s"\
% (self.status,self.headers,self.body))
class util():
def __init__(self,abc):
self.td = abc
def run(self):
res = RESPONSE()
res.headers.clear()
try:
#print ('host:%s' % (self.td.host))
conn = httplib.HTTPConnection(self.td.host,timeout=10)
#conn.set_debuglevel(1)
#print(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(time.time())))
#self.td.headers["Date"] = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(time.time()))#time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(time.time()))
self.td.headers["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(time.time()))
if self.td.path.find(G_ODPS_EVENT_PATH) < 0:
signature = self.td.genauth()
self.td.headers["Authorization"] = "ODPS " + self.td.accessid + ":" + signature.decode()
#return self.td.headers
#self.td.output()
#print ("com:139 %s" % self.td.path)
conn.request(self.td.method,
self.td.path,
self.td.body,
self.td.headers)
ret = conn.getresponse()
except socket.error as v:
#print v
return None
for head in ret.getheaders():
res.headers[head[0]] = head[1]
res.reason = ret.reason
res.status = ret.status
res.version = ret.version
res.body = ret.read()
#res.output()
conn.close()
return res
class Signer(object):
def __init__(self,access_id,access_key):
self.access_id = access_id
self.access_key= access_key
def gen(self,host):
access_id=self.access_id
access_key=self.access_key
td = REQUEST(access_id,access_key,host)
#td.host = "%s" % G_ODPS_HOST
td.host = "%s" % host
td.method = "GET"
td.path = "%s/projects/" % G_ODPS_PREFIX
td.headers = {}
#print(time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(time.time())))
td.headers["Date"] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
test1 = util(td)
ret = test1.run()
return ret.headers
def uploadLog(filename,gChecklistURL="http://172.24.102.214:8080"):
try:
# curl -F "filename=@cao;type=text/plain" "http://172.24.102.214:8888/uploadLoga"
#print(filename)
filename=urllib.request.quote(filename)
requestUrl='''curl -F "filename=@%s;type=text/plain" "%s/uploadLog"''' % (filename,gChecklistURL)
#print(requestUrl)
r=do_check_cmd(requestUrl)
return r
except Exception as e:
print("uploadLog error:%s" % format(str(e)))
def filterList(fieldList,negFieldList):
retList=[]
for item in fieldList:
if item not in negFieldList:
retList.append(item)
return retList
def alarmWrapper():
ip=do_cmd("hostname -i")[1].decode('utf-8').strip("\n")
retStr="agent ip:%s" % (ip)
wangwang("慕宗",retStr,subject="\"insert webserver failed!\"")
def do_check_cmd(cmd):
r=(1,'','')
try:
r=do_cmd(cmd)
except Exception as e:
print("cmd execute error:%s" % (format(str(e))))
alarmWrapper()
return r
return r
def wangwang(nick,retStr,subject="\"table comparation failed\""):
#curl "http://kelude.taobao.net/api/admin/notice/wangwang?auth=155537aa6e5c65a42e89f3a8c10a6892&nick=慕宗&subject=旺旺通知&context=内容"
#log=olog.getLog("check","debug")
context="\""+retStr+"\""
#log("nick=%s\tsubject=%s\tcontext=%s" % (nick,subject,context))
if nick == "" :
#log("wangwang nick is empty,send it to 慕宗")
nick="慕宗"
baseURL="http://kelude.taobao.net/api/admin/notice/wangwang?auth=155537aa6e5c65a42e89f3a8c10a6892"
requestURL="curl '%s&%s'" % (baseURL,urllib.parse.urlencode({"nick":nick,"subject":subject,"context":context}))
#log("requestURL:%s" % requestURL)
do_cmd(requestURL)
def test():
curday="2014-01-01-11-22-33"
print(isToday(curday))
curday="2014-12-22-00-00-00"
print(isToday(curday))
curday="20141212"
print(isToday(curday))
curday="2014-12-22-01-00-00"
print(isToday(curday))
curday="201412120101"
print(isToday(curday))
sys.exit(0)
## access_id="3ARMp0GSruSLnMwI"
## access_key="bAdvSomgQfxJWcULf7w2AJo2PJ6WRA"
## sign = Signer(access_id,access_key)
# #print ('result: %s' % sign.gen("10.206.120.19")["Authorization"])
# print ('result: %s' % sign.gen("10.206.120.19"))
#filename="/20130601/132cbac1e22a0db2587ddf11802cb4f2a3f238b185ca54fe0df485da.log"
logDirName="/home/taobao/httpd/htdocs"
filename=logDirName+"/20131106/757f8ee6db78008700ebd59bf49e75aebbe4acee4749ba27b025d9c3.log"
uploadLog(filename)
if __name__ == '__main__':
stdout=open("stdout.txt","w")
stderr=open("stdout.txt","w")
cmd = ''' ls ./'''
do_cmd(cmd,stdout=stdout, stderr=stderr)
|
[
"mingchao.xiamc@alibaba-inc.com"
] |
mingchao.xiamc@alibaba-inc.com
|
20a0d38f554f05963b4964a72b1cb873a779711e
|
002c1b2260184342d79f2e6a71344b169f876b0a
|
/emotion_recognition.py
|
535e4cc3cd56b03683b7b9b134e9290d7a5b2276
|
[] |
no_license
|
dhirenkinha/real-time-emotion-detection-
|
e334140704b8e7ab01a23a8710fff5e966da8b19
|
4afdc6bf949c0961d0be13d25aacc410a28435c6
|
refs/heads/main
| 2023-07-19T11:56:42.191995
| 2021-08-21T13:58:46
| 2021-08-21T13:58:46
| 398,568,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,182
|
py
|
import sys, os
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D, BatchNormalization,AveragePooling2D
from keras.losses import categorical_crossentropy
from keras.optimizers import Adam
from keras.regularizers import l2
from keras.utils import np_utils
df=pd.read_csv('fer2013.csv')
# print(df.info())
# print(df["Usage"].value_counts())
# print(df.head())
X_train,train_y,X_test,test_y=[],[],[],[]
for index, row in df.iterrows():
val=row['pixels'].split(" ")
try:
if 'Training' in row['Usage']:
X_train.append(np.array(val,'float32'))
train_y.append(row['emotion'])
elif 'PublicTest' in row['Usage']:
X_test.append(np.array(val,'float32'))
test_y.append(row['emotion'])
except:
print(f"error occured at index :{index} and row:{row}")
num_features = 64
num_labels = 7
batch_size = 64
epochs = 50
width, height = 48, 48
X_train = np.array(X_train,'float32')
train_y = np.array(train_y,'float32')
X_test = np.array(X_test,'float32')
test_y = np.array(test_y,'float32')
train_y=np_utils.to_categorical(train_y, num_classes=num_labels)
test_y=np_utils.to_categorical(test_y, num_classes=num_labels)
#cannot produce
#normalizing data between 0 and 1
X_train -= np.mean(X_train, axis=0)
X_train /= np.std(X_train, axis=0)
X_test -= np.mean(X_test, axis=0)
X_test /= np.std(X_test, axis=0)
X_train = X_train.reshape(X_train.shape[0], 48, 48, 1)
X_test = X_test.reshape(X_test.shape[0], 48, 48, 1)
# print(f"shape:{X_train.shape}")
##designing the cnn
#1st convolution layer
model = Sequential()
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu', input_shape=(X_train.shape[1:])))
model.add(Conv2D(64,kernel_size= (3, 3), activation='relu'))
# model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2, 2)))
model.add(Dropout(0.5))
#2nd convolution layer
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
# model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2, 2)))
model.add(Dropout(0.5))
#3rd convolution layer
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
# model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2,2), strides=(2, 2)))
model.add(Flatten())
#fully connected neural networks
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(1024, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_labels, activation='softmax'))
# model.summary()
#Compliling the model
model.compile(loss=categorical_crossentropy,
optimizer=Adam(),
metrics=['accuracy'])
#Training the model
model.fit(X_train, train_y,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(X_test, test_y),
shuffle=True)
#Saving the model
fer_json = model.to_json()
with open("fer.json", "w") as json_file:
json_file.write(fer_json)
model.save_weights("fer.h5")
|
[
"noreply@github.com"
] |
dhirenkinha.noreply@github.com
|
957ae4032fba1d6a7a8ad5bf9eda0c661b983958
|
4a62e36ef5f9cfe1de8c25f259ad8b8841b04525
|
/dktools/hello.py
|
d3661ffebc26d95e10e3db7de4e2d33635f70e9c
|
[
"MIT"
] |
permissive
|
dollarkillerx/pytools
|
dfc5c9c92fea47df69926f612c2d20e1de6b4b96
|
65e1dfe48c6dab6481a721405e8b4b8ca6dd72e9
|
refs/heads/master
| 2020-09-02T16:21:11.172172
| 2019-11-05T00:49:59
| 2019-11-05T00:49:59
| 219,258,113
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 154
|
py
|
# -*- coding: utf-8 -*-
# @Time : 2019-11-03 14:11
# @Author : DollarKillerx
# @Description :
def hello():
print('Hello World! DollarKillerTools')
|
[
"dollarkiller@vip.qq.com"
] |
dollarkiller@vip.qq.com
|
c844f0a0bac6fb22f3436eb2389dc87bb63fc3c4
|
c6342a8ad7ffc02806f54e3aae2e99f21df45fb4
|
/ast/letstar.py
|
0984e6cc82ad7f4c816741923fdfef2586325148
|
[
"MIT"
] |
permissive
|
choleraehyq/yuujins
|
a3c1572bc175a2cdc318798c82df772a5da31f12
|
dff6f7def0081f24afac30a7c1e3ca6755a5ea3f
|
refs/heads/master
| 2021-01-19T21:32:12.799286
| 2017-02-19T14:03:12
| 2017-02-19T14:03:12
| 82,512,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
from typing import List
from ast import Node, Name
from scope import Scope
from value import Value
import binder
class LetStar(Node):
def __init__(self, patterns: List[Name], exprs: List[Node], body: Node) -> None:
self.patterns: List[Name] = patterns
self.exprs: List[Node] = exprs
self.body: Node = body
def eval(self, env: Scope) -> Value:
e: Scope = env
for i, _ in enumerate(self.patterns):
e = Scope(e)
binder.define(e, self.patterns[i].identifier, self.exprs[i].eval(e))
return self.body.eval(e)
def __str__(self) -> str:
bindings: str = ' '.join(['({} {})'.format(self.patterns[i], self.exprs[i]) for i in range(len(self.patterns))])
return f'(let* ({bindings}) {self.body})'
|
[
"choleraehyq@gmail.com"
] |
choleraehyq@gmail.com
|
d2503b3707e7573fa9adbaf17b49181de71d214f
|
cbf10f49d22fe669225b067a322a0a4ecd7733aa
|
/data/data_loader_multigraph.py
|
f93803f40d8368e9f4907fb2d2193b46b02f8f56
|
[] |
no_license
|
q3erf/GLAM
|
1bc5ecf3bc075615f0e1b0664812483fffe8e576
|
ecc1e7d34daeba22362e2a67844f10167c5d8bfc
|
refs/heads/main
| 2023-08-24T13:26:14.246985
| 2021-10-20T07:06:53
| 2021-10-20T07:06:53
| 416,167,724
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,523
|
py
|
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
import random
from data.pascal_voc import PascalVOC
from data.willow_obj import WillowObject
from data.SPair71k import SPair71k
from utils.config import cfg
datasets = {"PascalVOC": PascalVOC,
"WillowObject": WillowObject,
"SPair71k": SPair71k}
class GMDataset(Dataset):
def __init__(self, name, length, **args):
self.name = name
self.ds = datasets[name](**args)
self.true_epochs = length is None
self.length = (
self.ds.total_size if self.true_epochs else length
) # NOTE images pairs are sampled randomly, so there is no exact definition of dataset size
if self.true_epochs:
print(f"Initializing {self.ds.sets}-set with all {self.length} examples.")
else:
print(f"Initializing {self.ds.sets}-set. Randomly sampling {self.length} examples.")
# length here represents the iterations between two checkpoints
# if length is None the length is set to the size of the ds
self.obj_size = self.ds.obj_resize
self.classes = self.ds.classes
self.cls = None
self.num_graphs_in_matching_instance = None
def set_cls(self, cls):
if cls == "none":
cls = None
self.cls = cls
if self.true_epochs: # Update length of dataset for dataloader according to class
self.length = self.ds.total_size if cls is None else self.ds.size_by_cls[cls]
def set_num_graphs(self, num_graphs_in_matching_instance):
self.num_graphs_in_matching_instance = num_graphs_in_matching_instance
def __len__(self):
return self.length
def __getitem__(self, idx):
sampling_strategy = cfg.train_sampling if self.ds.sets == "train" else cfg.eval_sampling
if self.num_graphs_in_matching_instance is None:
raise ValueError("Num_graphs has to be set to an integer value.")
idx = idx if self.true_epochs else None
anno_list, perm_mat_list = self.ds.get_k_samples(idx, k=self.num_graphs_in_matching_instance, cls=self.cls, mode=sampling_strategy)
for perm_mat in perm_mat_list:
if (
not perm_mat.size
or (perm_mat.size < 2 * 2 and sampling_strategy == "intersection")
and not self.true_epochs
):
# 'and not self.true_epochs' because we assume all data is valid when sampling a true epoch
next_idx = None if idx is None else idx + 1
return self.__getitem__(next_idx)
points_gt = [np.array([(kp["x"], kp["y"]) for kp in anno_dict["keypoints"]]) for anno_dict in anno_list]
n_points_gt = [len(p_gt) for p_gt in points_gt]
ret_dict = {
"Ps": [torch.Tensor(x) for x in points_gt],
"ns": [torch.tensor(x) for x in n_points_gt],
"gt_perm_mat": perm_mat_list,
# "edges": graph_list,
}
imgs = [anno["image"] for anno in anno_list]
if imgs[0] is not None:
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize(cfg.NORM_MEANS, cfg.NORM_STD)])
imgs = [trans(img) for img in imgs]
ret_dict["images"] = imgs
elif "feat" in anno_list[0]["keypoints"][0]:
feat_list = [np.stack([kp["feat"] for kp in anno_dict["keypoints"]], axis=-1) for anno_dict in anno_list]
ret_dict["features"] = [torch.Tensor(x) for x in feat_list]
return ret_dict
def collate_fn(data: list):
"""
Create mini-batch data for training.
:param data: data dict
:return: mini-batch
"""
def pad_tensor(inp):
assert type(inp[0]) == torch.Tensor
it = iter(inp)
t = next(it)
max_shape = list(t.shape)
while True:
try:
t = next(it)
for i in range(len(max_shape)):
max_shape[i] = int(max(max_shape[i], t.shape[i]))
except StopIteration:
break
max_shape = np.array(max_shape)
padded_ts = []
for t in inp:
pad_pattern = np.zeros(2 * len(max_shape), dtype=np.int64)
pad_pattern[::-2] = max_shape - np.array(t.shape)
pad_pattern = tuple(pad_pattern.tolist())
padded_ts.append(F.pad(t, pad_pattern, "constant", 0))
return padded_ts
def stack(inp):
if type(inp[0]) == list:
ret = []
for vs in zip(*inp):
ret.append(stack(vs))
elif type(inp[0]) == dict:
ret = {}
for kvs in zip(*[x.items() for x in inp]):
ks, vs = zip(*kvs)
for k in ks:
assert k == ks[0], "Key value mismatch."
ret[k] = stack(vs)
elif type(inp[0]) == torch.Tensor:
new_t = pad_tensor(inp)
ret = torch.stack(new_t, 0)
elif type(inp[0]) == np.ndarray:
new_t = pad_tensor([torch.from_numpy(x) for x in inp])
ret = torch.stack(new_t, 0)
elif type(inp[0]) == str:
ret = inp
# elif type(inp[0]) == Data: # Graph from torch.geometric, create a batch
# ret = Batch.from_data_list(inp)
else:
raise ValueError("Cannot handle type {}".format(type(inp[0])))
return ret
ret = stack(data)
return ret
def worker_init_fix(worker_id):
"""
Init dataloader workers with fixed seed.
"""
random.seed(cfg.RANDOM_SEED + worker_id)
np.random.seed(cfg.RANDOM_SEED + worker_id)
def worker_init_rand(worker_id):
"""
Init dataloader workers with torch.initial_seed().
torch.initial_seed() returns different seeds when called from different dataloader threads.
"""
random.seed(torch.initial_seed())
np.random.seed(torch.initial_seed() % 2 ** 32)
def get_dataloader(dataset, fix_seed=True, shuffle=False):
return torch.utils.data.DataLoader(
dataset,
batch_size=cfg.BATCH_SIZE,
shuffle=shuffle,
num_workers=1,
collate_fn=collate_fn,
pin_memory=False,
worker_init_fn=worker_init_fix if fix_seed else worker_init_rand,
)
|
[
"1451159047@qq.com"
] |
1451159047@qq.com
|
01f4b1da0157f33f4b067d4203993cd2534636c1
|
909670306b1097de1f87a49ebc03e69902c9d508
|
/childcare_check_in/childcare_check_in/urls.py
|
5a2b788d22a2aee7b3ab586001cbd8e06e6bb2ae
|
[] |
no_license
|
buypolarbear/childcare_check_in
|
16d33702a4cf11b2bbd59d29e1cade1807ac3bd8
|
cb0e428d11bdb602984e7863c8c0265050d95952
|
refs/heads/master
| 2020-05-05T04:53:45.633028
| 2016-12-29T16:16:48
| 2016-12-29T16:16:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,025
|
py
|
from django.conf.urls import url, include
from django.contrib import admin
from childcare_app.views import IndexView, UserCreateView, ChildCreateView, CheckCreateView, \
CheckUpdateView, ProfileListView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('django.contrib.auth.urls')),
url(r'^$', IndexView.as_view(), name="index_view"),
url(r'^accounts/profile/$', ProfileListView.as_view(), name="profile_view"),
url(r'^create_user/$', UserCreateView.as_view(), name="user_create_view"),
url(r'^create_child/$', ChildCreateView.as_view(), name="child_create_view"),
url(r'^check_in/(?P<pk>\d+)/$', CheckCreateView.as_view(), name="check_create_view"),
url(r'^check_out/(?P<pk>\d+)/$', CheckUpdateView.as_view(), name="check_update_view"),
# url(r'^child_check_in/(?P<pk>\d+)/$', ChildCreateView.as_view(), name="child_create_view"),
# url(r'^child_check_out/(?P<pk>\d+)/$', ChildUpdateView.as_view(), name="child_update_view"),
]
|
[
"tommyhuynh93@gmail.com"
] |
tommyhuynh93@gmail.com
|
e6b3bb5dd8e86507203bcfaf1ff623d8648ffe3f
|
4bfbc7e4754bfa679d64aab077952c7cdbeaf4be
|
/wagtailimagecaption/views/remover.py
|
6dd7996b9d1e9e2ddf6e22d3a77608e06abb11d3
|
[
"BSD-3-Clause"
] |
permissive
|
springload/wagtailimagecaption
|
c646ac158b16d0a3e1d897fa3ebb976833e19be7
|
7a82cf8466a26c2fae186638c0d613b8c0d342d3
|
refs/heads/master
| 2020-12-11T04:09:37.061248
| 2018-02-27T22:55:04
| 2018-02-27T22:55:04
| 27,413,798
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 923
|
py
|
import json
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import permission_required
from wagtail.wagtailadmin.modal_workflow import render_modal_workflow
from wagtail.wagtailimages.models import Image
@permission_required('wagtailadmin.access_admin')
def imagecaption_remove(request, image_id):
print image_id
try:
image = Image.objects.get(id=image_id)
if image.imagecaption.count() > 0:
imagecaption = image.imagecaption.all()[0]
imagecaption.delete()
return render_modal_workflow(
request,
None,
"wagtailimagecaption/remover/imagecaption_removed.js",
{
'response': json.dumps({
'status': True,
'image_id': image_id,
})
}
)
except ObjectDoesNotExist:
return None
|
[
"jordi.joan@gmail.com"
] |
jordi.joan@gmail.com
|
0ad4ba0d86ec0942cf6592c46f1d38a0d104194e
|
f121b7aeacd41d05d9e0560571a17458f546a69b
|
/pandoc/helper.py
|
ae46c8beeeb77c2a3f34dfdf81d1d86a784b1e68
|
[
"MIT"
] |
permissive
|
G48D/mypandoc
|
7ec67a9df865c7fdf70a894560e731baba062953
|
c66bf2e8148f6af5e6d39d6d3ab0fa91b6385fe9
|
refs/heads/master
| 2020-03-15T16:27:32.818574
| 2018-05-05T06:44:12
| 2018-05-05T06:44:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,006
|
py
|
# -*- coding: utf-8 -*-
"""filters.py:
Find all available filters.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2017-, Dilawar Singh"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import sys
import os
import shutil
import subprocess
import re
script_dir_ = os.path.dirname( os.path.realpath( __file__ ) )
# Third party filters_ I use.
# citeproc must come after crossref.
filters_ = [
'pantable' # pantable before crossref
, 'pandoc-crossref', 'pandoc-citeproc'
, 'pandoc-imagine'
]
def path_of_filters( filters = filters_ ):
paths = [ shutil.which( f ) for f in filters ]
return [ p for p in paths if p is not None ]
def generic_filters( filters = None ):
if not filters:
filters = filters_
flts = path_of_filters ( filters )
flts.append( os.path.join( script_dir_, 'dilawar.py' ) )
return flts
def pandoc_cmd( ):
path = shutil.which( 'pandoc' )
if path is None:
log( "`red Could not find pandoc.`", 'ERROR' )
quit( -1 )
return path
def run( cmd ):
log( "Executing `blue %s`" % cmd )
cmd = cmd.split( )
cmd = [ x for x in cmd if x.strip() ]
pipe = subprocess.Popen(cmd
, stderr=subprocess.PIPE
, stdout=subprocess.PIPE
)
stdout, err = pipe.communicate()
try:
stdout = stdout.decode('utf-8')
err = err.decode('utf-8')
except Exception as e:
pass
if pipe.returncode != 0:
log( 'FAILED\n| RETCODE: %s\n| ERROR: %s' % (pipe.returncode, err))
log( '| OUTPUT: %s' % stdout )
log( '| COMMAND: %s' % ' '.join(cmd) )
def default_tex_template( ):
return os.path.join( script_dir_, 'templates', 'default.latex' )
def log( msg, level = 'INFO' ):
try:
from sty import ef, fg, rs
boldPat = re.compile( r'(\*\*)(?P<text>.+?)(\*\*)', re.DOTALL )
itPat = re.compile( r'(\*)(?P<text>.+?)(\*)', re.DOTALL )
colorPat = re.compile( r'`(?P<color>\w+)\s+(?P<text>.+?)\`', re.DOTALL )
# bold
for m in boldPat.finditer( msg ):
msg = msg.replace( m.group(0), ef.b + m.group('text') + rs.b )
# italics
for m in itPat.finditer( msg ):
msg = msg.replace( m.group(0), ef.i + m.group('text') + rs.i )
# Insert colors.
for m in colorPat.finditer( msg ):
c, t = m.group('color'), m.group( 'text' )
msg = msg.replace( m.group(0), '%s%s' % (getattr(fg,c), t) + fg.rs )
except Exception as e:
pass
try:
print('[%3s] %s' % (level, msg), file=sys.stderr)
except Exception as e:
print('[%3s] %s' % (level, msg.encode('utf-8')), file=sys.stderr)
def test( ):
log( '`blue *Hellow* kitty`. `red how are you __today__`. I am _fine_.' )
if __name__ == '__main__':
test()
|
[
"dilawars@ncbs.res.in"
] |
dilawars@ncbs.res.in
|
c89a2e6db796e083762ffce244b80d91e253e993
|
3d843a0d7c5473548d21543a9a1d3a2569dfa5c3
|
/baidu_spider/spider_main.py
|
02323df2cda5b8d70ef1b4ec03a465603d6c1883
|
[] |
no_license
|
current-liu/Spider
|
0d6782818aa9a63e2d1b65c7ebcf2f2030676260
|
ca41661d97791fa3b32bf1ad9693043212fbdbb5
|
refs/heads/master
| 2021-05-15T22:57:22.743253
| 2017-10-27T10:15:40
| 2017-10-27T10:15:40
| 106,762,634
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,473
|
py
|
# coding:utf8
import html_downloader
import html_outputer
import html_parser
import url_manager
class SpiderMain(object):
def __init__(self):
self.urls = url_manager.UrlManager()
self.downloader = html_downloader.HtmlDownloader()
self.parser = html_parser.HtmlParser()
self.outputer = html_outputer.HtmlParser()
def craw(self, root_url):
count = 1
self.urls.add_new_url(root_url) # 把根url加入new_urls元组
while self.urls.has_new_url(): # 如果有新的url
try:
new_url = self.urls.get_new_url() # 获得一个url
print 'craw %d : %s' % (count, new_url)
html_cont = self.downloader.download(new_url) # 从这个url下载内容
new_urls, new_data = self.parser.parse(new_url, html_cont) # 从内容中获取url和data
self.urls.add_new_urls(new_urls) # 将获取的url加到url列表里
self.outputer.collect_data(new_data) # 输出data
if count == 10:
break
count = count + 1
except:
print "craw failed"
self.outputer.output_html()
# print "allUrl:", self.urls.get_all_url()
# for url in self.urls.get_all_url():
# print url
if __name__ == "__main__":
root_url = "http://baike.baidu.com/item/Python"
obj_spider = SpiderMain()
obj_spider.craw(root_url)
|
[
"365493573@qq.com"
] |
365493573@qq.com
|
31b9f0f83c0e0d3d7065477a7124f76186467669
|
3f96a58c20b30a164ae9363f7522758f0d9ed4fe
|
/skyscanner_makeurl.py
|
173dbd481a1c010b4834497bc6f21b27b27479a0
|
[
"MIT"
] |
permissive
|
gen2127/TK_1919
|
f84d16d9a8376e972c9af466dbf9407822ed3097
|
2e0ab50fe6f02d046eed27da5da1a1f45c05174a
|
refs/heads/master
| 2020-08-28T01:01:03.290550
| 2019-11-08T18:27:40
| 2019-11-08T18:27:40
| 217,540,694
| 0
| 0
|
MIT
| 2019-10-25T13:33:12
| 2019-10-25T13:33:12
| null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
import webbrowser
import datetime
params={
"inboundDate": "2019-09-10",
"cabinClass": "business",
"children": 0,
"infants": 0,
"country": "US", #ここから下が必須データ
"currency": "USD",
"locale": "en-US",
"originPlace": "SFO",
"destinationPlace": "LHR",
"outboundDate": str(datetime.date.today()),
"adults": 1
}
url = 'https://www.skyscanner.jp/transport/flights/'+params['originPlace']+'/'+params['destinationPlace']+'/'+params['outboundDate']+'/?adultsv2=1/'
print(url)
webbrowser.open(url,1)
|
[
"ryoheinod@gmail.com"
] |
ryoheinod@gmail.com
|
a8c5343963ce4fe2c601470689de432717b1e410
|
89365087250d7babb3e846b699afbeedb71e697f
|
/Tester.py
|
7e542e39ef12f40867a56c38265f55abfeaedacd
|
[] |
no_license
|
bigbizzy001/hydrateformationcorrelations
|
e5f3908de30dd7dd20a3fcb9df3894f5a0f59881
|
9fa892caf9ab239b72d09249baa8572380d61051
|
refs/heads/master
| 2020-03-26T17:43:40.805626
| 2018-08-18T00:14:25
| 2018-08-18T00:14:25
| 145,176,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 652
|
py
|
from Correlations import HFCorrelations
case = HFCorrelations()
print(case.hammerschmidt(1, 50) , "\n")
print('-'*500)
print(case.bahadori_and_vuthaluru(1, 50, 0.8))
aa = case.bahadori_and_vuthaluru(1, 50, 0.8)
x, y = [], []
for i in range(len(aa)):
x.append(aa[i][0])
y.append(aa[i][1])
print(x)
print(y)
print('-'*500)
print(case.towler_and_mokhatab(1, 50, 0.8))
print('-'*500)
print(case.berge(1, 50, 0.8))
print('-'*500)
print(case.holder_et_al(1, 50))
print('-'*500)
print(case.motiee(1, 50, 0.8))
print('-'*500)
print(case.aut(1, 50, 0.8))
print('-'*500)
print(case.kobayashi_et_al(1, 50, 0.8))
print('-'*500)
print("The End")
|
[
"marcusbourdelon@gmail.com"
] |
marcusbourdelon@gmail.com
|
3559180aa73350be00dff30775a3775f2003d493
|
63f39a64048a1eba18adc8336976a21d13f2e3df
|
/Código-fonte/object-detection/service/object_detection_service/views.py
|
6ed968ec31c77f7c4de9a958b372925ef8dec997
|
[] |
no_license
|
EvoSystems-com-br/ResidenciaSW2018_MonitIntelAmbiente
|
57a6870e67837c6a3909defa0aac0ce6b897d98a
|
7720238af41236c9b9dc1b28d5b193cb8d7f5553
|
refs/heads/master
| 2020-03-26T16:48:40.717847
| 2018-10-30T21:16:04
| 2018-10-30T21:16:04
| 145,124,337
| 0
| 0
| null | 2018-10-30T19:39:02
| 2018-08-17T13:33:48
| null |
UTF-8
|
Python
| false
| false
| 6,939
|
py
|
import base64
from io import BytesIO
import cv2
import os
import sys
import json
import requests
from PIL import Image
from django.http import HttpResponse, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from service import settings
sys.path.append('{}/../../'.format(os.path.dirname(os.path.abspath(__file__))))
sys.path.append('{}/../'.format(os.path.dirname(os.path.abspath(__file__))))
from detector import Detector
from object_detection_service.models import cameras_url, mqtt_client, object_detector_threads
from threads.object_detector import ObjectDetector
from threads.video_streaming import VideoStreaming
def messenger(message):
mqtt_client.publish(topic="object-detection/objects", payload=json.dumps(message))
def on_detection_finish(cam_id, timeout):
del object_detector_threads[cam_id]
mqtt_client.publish(topic="object-detection/remove", payload=cam_id)
if timeout:
mqtt_client.publish(topic="object-detection/logs/success",
payload='Camera {} was unregistered automatically by timeout'.format(cam_id))
@csrf_exempt
def register(request):
already_registered = 'Register error. Camera {} is already registered.'
not_allowed = 'Register error. Camera {} not allowed.'
user_not_responding = 'Register error. Users service is not responding. ' \
'Unable to check camera permission.'
success = 'Camera {} was successfully registered'
if request.method == 'POST':
cam_id = request.POST.get('cam_id')
if object_detector_threads.get(cam_id) is not None:
mqtt_client.publish(topic="object-detection/logs/error",
payload=already_registered.format(cam_id))
return HttpResponse(already_registered.format(cam_id), status=400)
try:
url = cameras_url + '{}/'.format(cam_id)
cam_request = requests.get(url, timeout=4)
response_id = cam_request.json()['id']
if cam_request.status_code != requests.codes.ok or response_id != cam_id:
mqtt_client.publish(topic="object-detection/logs/error",
payload=not_allowed.format(cam_id))
return HttpResponse(not_allowed.format(cam_id), status=403)
except (requests.exceptions.ConnectTimeout, requests.exceptions.ConnectionError):
mqtt_client.publish(topic="object-detection/logs/error",
payload=user_not_responding.format(cam_id))
return HttpResponse(user_not_responding.format(cam_id), status=500)
detector = Detector()
detector.load_model()
vs = VideoStreaming(settings.GPU_SERVER_IP, cam_id)
od = ObjectDetector(vs, detector, messenger, on_detection_finish)
od.start()
object_detector_threads[cam_id] = od
mqtt_client.publish(topic="object-detection/add", payload=cam_id)
mqtt_client.publish(topic="object-detection/logs/success",
payload=success.format(cam_id))
return HttpResponse(od.get_port(), status=200)
else:
return HttpResponse("Method not allowed", status=405)
@csrf_exempt
def monitor(request):
not_found = 'Monitor error. Camera {} not found.'
bad_request = 'Monitor error. Invalid address ({}, {}). Port must be an integer.'
success = 'Monitor success. Sending video to address ({}, {}).'
if request.method == 'POST':
cam_id = request.POST.get('cam_id')
client_ip = request.POST.get('client_ip')
client_port = request.POST.get('client_port')
object_detector = object_detector_threads.get(cam_id)
if object_detector is None:
mqtt_client.publish(topic="object-detection/logs/error",
payload=not_found.format(cam_id))
return HttpResponse(not_found.format(cam_id), status=404)
try:
client_port = int(client_port)
except TypeError:
mqtt_client.publish(topic="object-detection/logs/error",
payload=bad_request.format(client_ip, client_port))
return HttpResponse(bad_request.format(client_ip, client_port), status=400)
object_detector.monitor(client_ip, client_port)
mqtt_client.publish(topic="object-detection/logs/success",
payload=success.format(client_ip, client_port))
return HttpResponse(success.format(client_ip, client_port), status=200)
else:
return HttpResponse("Method not allowed", status=405)
@csrf_exempt
def unregister(request):
not_found = 'Unregister error. Camera {} not found.'
success = 'Camera {} was successfully unregistered'
if request.method == 'POST':
cam_id = request.POST.get('cam_id')
object_detector = object_detector_threads.get(cam_id)
if object_detector is None:
mqtt_client.publish(topic="object-detection/logs/error",
payload=not_found.format(cam_id))
return HttpResponse(not_found.format(cam_id), status=404)
object_detector.kill()
mqtt_client.publish(topic="object-detection/logs/success",
payload=success.format(cam_id))
return HttpResponse(success, status=200)
else:
return HttpResponse("Method not allowed", status=405)
@csrf_exempt
def status(request):
if request.method == 'GET':
response = []
for cam_id, od in object_detector_threads.items():
response.append({
'id': od.get_id(),
'video_fps': od.get_video_fps(),
'detection_fps': od.get_detection_fps(),
})
return JsonResponse(response, safe=False)
else:
return HttpResponse("Method not allowed", status=405)
@csrf_exempt
def event_print(request):
not_found = 'Event print error. Camera {} not found.'
success = 'An event print was requested from camera {}'
if request.method == 'GET':
cam_id = request.GET.get('cam_id')
object_detector = object_detector_threads.get(cam_id)
if object_detector is None:
mqtt_client.publish(topic="object-detection/logs/error",
payload=not_found.format(cam_id))
return HttpResponse(not_found.format(cam_id), status=404)
frame = object_detector.get_frame()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pil_frame = Image.fromarray(frame)
buffered = BytesIO()
pil_frame.save(buffered, format="JPEG")
b64 = base64.b64encode(buffered.getvalue())
mqtt_client.publish(topic="object-detection/logs/success",
payload=success.format(cam_id))
return HttpResponse(b64, status=200)
else:
return HttpResponse("Method not allowed", status=405)
|
[
"jhonata.antunes@outlook.com"
] |
jhonata.antunes@outlook.com
|
34a648025419b546118a509d828ac13e4800eab5
|
12f2584aa766d037731005bc1fd188fd9d78a665
|
/style_transfer.py
|
2278c1c6b733f9e886718dd332075d65d1aca5f6
|
[] |
no_license
|
ddiddi/AProjectADay-12-NeuralStyleTransfer
|
e4ea4899baadb828a61e1d32a13d0f68b0939471
|
dec3f4342456dbc87dbd071d0510645bc44c02f6
|
refs/heads/master
| 2020-05-30T09:32:32.886449
| 2019-05-31T20:07:41
| 2019-05-31T20:07:41
| 189,648,088
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,068
|
py
|
# import the necessary packages
from imutils.video import VideoStream
from imutils import paths
import itertools
import argparse
import imutils
import time
import cv2
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-m", "--models", required=True,
help="path to directory containing neural style transfer models")
args = vars(ap.parse_args())
model_paths = paths.list_files(args["models"], validExts=(".t7",))
model_paths = sorted(list(model_paths))
# Generate unique IDs for each of the model paths, then combine
models = list(zip(range(0, len(model_paths)), (model_paths)))
# Use the cycle function to loop over all models
model_iter = itertools.cycle(models)
(model_ID, model_path) = next(model_iter)
# Load the neural style transfer model
net = cv2.dnn.readNetFromTorch(model_path)
# Initialize the video stream
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
print("[INFO] {}. {}".format(model_ID + 1, model_path))
while True:
frame = vs.read()
# Resize the frame to have a width of 600 pixels
frame = imutils.resize(frame, width=600)
orig = frame.copy()
(h, w) = frame.shape[:2]
# Construct a blob from the frame and forward pass through network
blob = cv2.dnn.blobFromImage(frame, 1.0, (w, h),
(103.939, 116.779, 123.680), swapRB=False, crop=False)
net.setInput(blob)
output = net.forward()
# Reshape the output tensor,
# Add back in the mean subtraction
output = output.reshape((3, output.shape[2], output.shape[3]))
output[0] += 103.939
output[1] += 116.779
output[2] += 123.680
output /= 255.0
# Swap the channel ordering
output = output.transpose(1, 2, 0)
# Show output
cv2.imshow("Input", frame)
cv2.imshow("Output", output)
# Key input
key = cv2.waitKey(1) & 0xFF
if key == ord("n"):
(model_ID, model_path) = next(model_iter)
print("[INFO] {}. {}".format(model_ID + 1, model_path))
net = cv2.dnn.readNetFromTorch(model_path)
elif key == ord("q"):
break
cv2.destroyAllWindows()
vs.stop()
|
[
"dhruv.diddi+1@gmail.com"
] |
dhruv.diddi+1@gmail.com
|
bb0727d12cf81aa36fad96edd91cc3bc7da48065
|
a0d800feaeb2112e0d7c8f31a74e7637522e9a5d
|
/mammal_sims_2/rep_52.py
|
b9e9993328991fbe5bf50b4cb588818e9c0d61a0
|
[] |
no_license
|
sebastianduchene/pacemaker_clustering_methods
|
fc0a914bdbfd3f6f759f549c71b7fd36bed066fc
|
f24233f92835bbba5132d041178eeda119318d56
|
refs/heads/master
| 2016-09-06T11:23:25.180835
| 2015-12-02T20:41:18
| 2015-12-02T20:41:18
| 31,933,201
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 590
|
py
|
import os, sys, re
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn import mixture
import scipy.stats as sp
from scipy.spatial import distance
import scipy.stats as sc
from GMM_trees import *
import itertools
import multiprocessing
from multiprocessing import Pool
execfile('GMM_trees.py')
execfile('functions.py')
# Simulate 100 data sets
mm_dat = pd.DataFrame(rescale_data(np.array(pd.read_csv('mammal_matrix.csv'))))
samples_temp = simulate_data(mm_dat, 2, 'spherical', 1)
f = fit_data(samples_temp[0])
open('out_s1_52.txt', 'w').write(f[0])
|
[
"sebastian.duchene@sydney.edu.au"
] |
sebastian.duchene@sydney.edu.au
|
5833a0b6db548d365a87282c51f074424fb2e9db
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_61/59.py
|
19256ed5af0628a119be46271105a30c91ec3097
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,240
|
py
|
import sys
import operator
def solve(v):
global Current
#print v,Current
if v>Current:
for i in range(v-Current):
Calc()
return sum(Count[v])%100003
def Calc():
global Current
#print 'Calc current' + str(Current)
Current=Current+1
Count.append( [0]*Current )
Count[Current][1]=1
for i in range(1,Current):
for j in range(i):
#print "On ", Current, i,j,Count[Current-1],Count[Current]
Count[Current][i]=Count[Current][i]+ Count[i][j]*NSelectK(Current-i-1, i-j-1)
#print "On ", Current, i,j,Count[Current-1],Count[Current]
def NSelectK(N,k):
if k>N:
return 0
if k>N/2:
k=N-k
if k==0:
return 1
prod=1
for i in range(k):
prod *= N-i
for i in range(1,k+1):
prod /= i
return prod
if __name__== '__main__':
a=[0]*3
a[1]=2
Count=[]
Current=2
Count.append([])
Count.append([0])
Count.append([0,1])
cases= int( sys.stdin.readline())
for i in range(cases):
v=[ int(item) for item in sys.stdin.readline().split() ]
print 'Case #%d: %d'%(i+1,solve(v[0]))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
fe3d8a763da9a37ccbfd6c0ead9ee7c8bcd9d945
|
23f274f95f824d02a21b378706895c8177bcbf59
|
/remenis/core/migrations/0010_auto__chg_field_user_email.py
|
de5cf326bbcc021c25308d695b19c68740599584
|
[] |
no_license
|
nathanwchan/remenis_fork_issue
|
9a0e1d0a5f546629cc2d6199ad9996944dc41707
|
47293af33adff7c8f52e6e10454aed7816aec1a4
|
refs/heads/master
| 2020-06-08T16:13:49.162348
| 2013-07-17T04:20:05
| 2013-07-17T04:20:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,539
|
py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'User.email'
db.alter_column('core_user', 'email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True))
def backwards(self, orm):
# Changing field 'User.email'
db.alter_column('core_user', 'email', self.gf('django.db.models.fields.EmailField')(default='', max_length=75))
models = {
'core.betaemail': {
'Meta': {'object_name': 'BetaEmail'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'core.story': {
'Meta': {'object_name': 'Story'},
'authorid': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_private': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'post_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'story': ('django.db.models.fields.TextField', [], {}),
'story_date_day': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'story_date_month': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'story_date_year': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'core.storycomment': {
'Meta': {'object_name': 'StoryComment'},
'comment': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'storyid': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Story']"})
},
'core.taggeduser': {
'Meta': {'object_name': 'TaggedUser'},
'fbid': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'storyid': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Story']"})
},
'core.user': {
'Meta': {'object_name': 'User'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'fbid': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_registered': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
}
}
complete_apps = ['core']
|
[
"aswath87@gmail.com"
] |
aswath87@gmail.com
|
97130ed871af6f47c43bd80f8edf3000df8bfbd7
|
73c64ca179a21df957e17099f58303df000cf1b1
|
/ChatBot/ChatBot/wsgi.py
|
9127753645375f29f7395528028f0d3b7f16c95d
|
[] |
no_license
|
Atlrrific/SHPEUCSDChatBot
|
9232af0a7dc09749028460c0365b9ae08ae910cc
|
de6da7f0620ee986954a0b61574567f2d7c5dcf2
|
refs/heads/master
| 2021-01-12T12:09:30.695805
| 2016-11-17T04:46:17
| 2016-11-17T04:46:17
| 72,327,341
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 392
|
py
|
"""
WSGI config for ChatBot project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ChatBot.settings")
application = get_wsgi_application()
|
[
"atlsaber@gmail.com"
] |
atlsaber@gmail.com
|
ed4f9c3787b36cc4dcd77cb15647e30d60c6928d
|
75f5132f2b2159bd0d622e1b457d82dd9f33c41d
|
/ghost_post/urls.py
|
0d00520121021fbd83c39f280dab47bedac6c174
|
[] |
no_license
|
ethan375/ghost-post-backend
|
c822f3bf6a358bad0e418d38cb43deab835e5f27
|
d870a6a43854a1937a4050bc90ebb6d5d45fb601
|
refs/heads/master
| 2021-09-24T00:46:45.539929
| 2020-01-14T18:01:46
| 2020-01-14T18:01:46
| 231,163,427
| 0
| 0
| null | 2021-09-22T18:25:11
| 2020-01-01T01:26:22
|
Python
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
from django.contrib import admin
from django.urls import include, path
from rest_framework import routers
from ghosting import views
router = routers.DefaultRouter()
router.register(r'posts', views.PostsViewSet)
urlpatterns = [
path('', include(router.urls)),
path('admin/', admin.site.urls)
]
|
[
"ethanebel1@gmail.com"
] |
ethanebel1@gmail.com
|
b9ebc7fed43bf7a7e20fe82964536e33dcf55029
|
c4597d48b5edefc1ebb0fda3753bb61895c424b5
|
/assessment/migrations/0001_initial.py
|
476d0230b259ec8437198bade6fbddba7cfd356b
|
[
"MIT"
] |
permissive
|
vandorjw/django-assessment
|
dd3786fde018f133ac4dbbea965793ec245bec97
|
f5e8dbeba7084578d09483d403bd3dca49bc3181
|
refs/heads/master
| 2022-05-01T20:03:32.797289
| 2019-12-31T20:24:32
| 2019-12-31T20:24:32
| 18,918,043
| 10
| 3
|
MIT
| 2022-04-22T20:59:13
| 2014-04-18T16:09:23
|
Python
|
UTF-8
|
Python
| false
| false
| 8,729
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-31 05:30
from __future__ import unicode_literals
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import parler.models
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('answer', models.TextField(verbose_name='answer')),
],
options={
'verbose_name_plural': 'answers',
'verbose_name': 'answer',
},
),
migrations.CreateModel(
name='Choice',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_correct', models.BooleanField(default=False, verbose_name='correct')),
],
options={
'verbose_name_plural': 'choices',
'verbose_name': 'choice',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='ChoiceTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('value', models.CharField(max_length=512, verbose_name='value')),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='assessment.Choice')),
],
options={
'verbose_name': 'choice Translation',
'db_tablespace': '',
'default_permissions': (),
'db_table': 'assessment_choice_translation',
'managed': True,
},
),
migrations.CreateModel(
name='Question',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_required', models.BooleanField(default=False, verbose_name='required')),
('of_type', models.IntegerField(choices=[(1, 'true or false'), (2, 'multiple choice'), (3, 'text')], default=1, verbose_name='type')),
],
options={
'verbose_name_plural': 'questions',
'verbose_name': 'question',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='QuestionTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('question', models.CharField(max_length=512, verbose_name='question')),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='assessment.Question')),
],
options={
'verbose_name': 'question Translation',
'db_tablespace': '',
'default_permissions': (),
'db_table': 'assessment_question_translation',
'managed': True,
},
),
migrations.CreateModel(
name='Result',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('timestamp', models.DateTimeField(default=datetime.datetime.now, editable=False)),
],
options={
'verbose_name_plural': 'results',
'verbose_name': 'result',
},
),
migrations.CreateModel(
name='Survey',
fields=[
('_uid', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('is_private', models.BooleanField(default=False, verbose_name='private')),
('start_date_time', models.DateTimeField(default=datetime.datetime.now, verbose_name='start time')),
('end_date_time', models.DateTimeField(blank=True, null=True, verbose_name='end time')),
('admin', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='assessment_admin_surveys', to=settings.AUTH_USER_MODEL, verbose_name='owner')),
('users', models.ManyToManyField(blank=True, related_name='assessment_user_surveys', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name_plural': 'surveys',
'verbose_name': 'survey',
},
bases=(parler.models.TranslatableModelMixin, models.Model),
),
migrations.CreateModel(
name='SurveyTranslation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('language_code', models.CharField(db_index=True, max_length=15, verbose_name='Language')),
('name', models.CharField(max_length=160, verbose_name='name')),
('slug', models.SlugField(max_length=160, unique=True, verbose_name='slug')),
('description', models.TextField(verbose_name='description')),
('master', models.ForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='assessment.Survey')),
],
options={
'verbose_name': 'survey Translation',
'db_tablespace': '',
'default_permissions': (),
'db_table': 'assessment_survey_translation',
'managed': True,
},
),
migrations.AddField(
model_name='result',
name='survey',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='results', to='assessment.Survey', verbose_name='survey'),
),
migrations.AddField(
model_name='result',
name='user',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='results', to=settings.AUTH_USER_MODEL, verbose_name='user'),
),
migrations.AddField(
model_name='question',
name='survey',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='assessment.Survey', verbose_name='survey'),
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='choices', to='assessment.Question', verbose_name='question'),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='assessment.Question', verbose_name='question'),
),
migrations.AddField(
model_name='answer',
name='result',
field=models.ForeignKey(editable=False, on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='assessment.Result', verbose_name='result'),
),
migrations.AlterUniqueTogether(
name='surveytranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='result',
unique_together=set([('survey', 'user')]),
),
migrations.AlterUniqueTogether(
name='questiontranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='choicetranslation',
unique_together=set([('language_code', 'master')]),
),
migrations.AlterUniqueTogether(
name='answer',
unique_together=set([('result', 'question')]),
),
]
|
[
"joostvandorp@gmail.com"
] |
joostvandorp@gmail.com
|
daae3d5f8367b1c58ec8dbc7798f5ba1306f06db
|
a1b689f05eabaa4124f43f8beb9737b4a1e312dc
|
/site-personalization/paid_content/articles/migrations/0004_auto_20190517_1955.py
|
f5afefb25db7b150d1313b39422a7588fd0ca1dd
|
[] |
no_license
|
altovsky/dj-hw
|
7c8f6caedd001820e4e290662052a7c1007cf141
|
4c3d94369e370b301766891e7225dae2c725158c
|
refs/heads/master
| 2022-11-29T17:36:09.072879
| 2019-06-04T20:52:06
| 2019-06-04T20:52:06
| 177,335,756
| 0
| 0
| null | 2022-11-22T03:14:01
| 2019-03-23T20:14:41
|
Python
|
UTF-8
|
Python
| false
| false
| 734
|
py
|
# Generated by Django 2.2 on 2019-05-17 19:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0003_auto_20190428_0643'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='user_session',
),
migrations.AddField(
model_name='profile',
name='user_v',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
|
[
"igor.altovsky@yandex.ru"
] |
igor.altovsky@yandex.ru
|
9deed92c9e96dc463dc24bf36d7fd2fa2422306a
|
e9734181b2fa63958b0cd65a68790ac20f595b7f
|
/quiz_backend/quiz/serializer.py
|
e7317cdee6eba86fb3d3e84684077dc18e19879f
|
[] |
no_license
|
DubeySuvodeep/Quiz
|
ad61eba6fb9a7684c1a8bc60de3d5b5ba52dac30
|
a0f58913019010c6f79df50640a223f7506ec190
|
refs/heads/master
| 2022-11-21T16:11:15.884510
| 2020-07-19T09:58:24
| 2020-07-19T09:58:24
| 280,828,879
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
from rest_framework import serializers
from quiz.models import Question, SubmitAnswer, Choices, Score, Quiz
class ChoicesSerialzer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Choices
class QuestionSerializer(serializers.ModelSerializer):
choices = serializers.SerializerMethodField()
class Meta:
fields = ['quiz', 'description', 'correct_answer', 'choices']
model = Question
def get_choices(self, obj):
print("-------------")
print(obj)
try:
choices = Choices.objects.get(question=obj)
print(choices)
except Choices.DoesNotExist:
pass
return ChoicesSerialzer(choices).data
class SubmitAnswerSerializer(serializers.Serializer):
question_id = serializers.IntegerField()
answer = serializers.CharField()
def create(self, validated_data):
print("----------------------")
print(validated_data)
print("----------------------")
try:
question = Question.objects.get(id=validated_data.get('question_id'))
print(question)
except Question.DoesNotExist:
pass
submit_answer = SubmitAnswer.objects.create(question=question, answer=validated_data.get('answer'))
print("Created-----------------")
return submit_answer
class ScoreSerializer(serializers.ModelSerializer):
class Meta:
fields = '__all__'
model = Score
|
[
"suvodeep@talent500.co"
] |
suvodeep@talent500.co
|
2c3d0a1e3f4042d7b84ff97cd1baf99aa045f129
|
e98a7591ab761b3ed47c6e90d5345f3088ad9f54
|
/lesson6.py
|
5c4c6f711d44f68674cb065e82a93d03fc925d65
|
[] |
no_license
|
pingkong/python-learning
|
e75372b70f1fa41c5060e1c31d3fe402ddecf011
|
cb57e649bcc33724b3156bdff5883f55fd38c7a1
|
refs/heads/master
| 2021-01-21T13:53:01.771764
| 2014-07-01T19:29:00
| 2014-07-01T19:29:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 223
|
py
|
testrange = range(1,101)
sumrange=sum(testrange)
sumrangesquared=sumrange*sumrange
print sumrangesquared
squaresum=0
for i in testrange:
squareterm=i*i
squaresum=squaresum+squareterm
print sumrangesquared-squaresum
|
[
"l.austen@corp.badoo.com"
] |
l.austen@corp.badoo.com
|
c54ed1910b39131407d296acc215932089a83644
|
916480ae24345193efa95df013f637e0a115653b
|
/web/transiq/restapi/serializers/api.py
|
9d8ffbdb5e0a3f11316cbb5918f09e901a8c701f
|
[
"Apache-2.0"
] |
permissive
|
manibhushan05/tms
|
50e289c670e1615a067c61a051c498cdc54958df
|
763fafb271ce07d13ac8ce575f2fee653cf39343
|
refs/heads/master
| 2022-12-11T07:59:30.297259
| 2021-09-08T03:24:59
| 2021-09-08T03:24:59
| 210,017,184
| 0
| 0
|
Apache-2.0
| 2022-12-08T02:35:01
| 2019-09-21T16:23:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,295
|
py
|
from rest_framework import serializers
from rest_framework.validators import UniqueValidator
from api.models import S3Upload
class S3UploadSerializer(serializers.Serializer):
id = serializers.IntegerField(label='ID', read_only=True)
bucket = serializers.CharField(max_length=63)
folder = serializers.CharField(max_length=150)
uuid = serializers.CharField(max_length=50, validators=[UniqueValidator(queryset=S3Upload.objects.all())])
filename = serializers.CharField(max_length=150)
uploaded = serializers.BooleanField(default=True)
verified = serializers.BooleanField(required=False)
is_valid = serializers.BooleanField(required=False)
deleted = serializers.BooleanField(required=False)
uploaded_on = serializers.DateTimeField(allow_null=True, required=False)
deleted_on = serializers.DateTimeField(allow_null=True, required=False)
created_on = serializers.DateTimeField(read_only=True)
updated_on = serializers.DateTimeField(read_only=True)
def create(self, validated_data):
instance = S3Upload.objects.create(**validated_data)
return instance
def update(self, instance, validated_data):
S3Upload.objects.filter(id=instance.id).update(**validated_data)
return S3Upload.objects.get(id=instance.id)
|
[
"mani@myhost.local"
] |
mani@myhost.local
|
727b6081c68498d38119f92166aa48666c45a0b0
|
b871d542653a673f2555aa8ebcb84fb355e1462b
|
/CodeVita/Cadbury.py
|
636f3775e0ec60dfc6cdd8661c0664a7e78830eb
|
[] |
no_license
|
rajputchintan22/Algorithms
|
90c3005e14e2cbe89c206425fc60b435a651f72a
|
c8da369251d87657cbec5d597788913a63b52faf
|
refs/heads/master
| 2021-08-08T11:11:24.805876
| 2020-06-19T17:52:21
| 2020-06-19T17:52:21
| 193,098,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 515
|
py
|
p = int(input())
q = int(input())
r = int(input())
s = int(input())
output = 0
for i in range(p, q+1):
for j in range(r, s+1):
a = i
b = j
remainder = 1
while remainder != 0:
if a >= b:
temp = a // b
output += temp
remainder = a % b
a = remainder
else:
temp = b // a
output += temp
remainder = b % a
b = remainder
print(output)
|
[
"rajputchintan22@gmail.com"
] |
rajputchintan22@gmail.com
|
b5683e7846bde51816adcfb0d836fea3df1175fe
|
ad3a6b79f19310880c9ab72ca214f5c8731723ca
|
/strategies/RSI_Threshold.py
|
c74ec20836fad2141ac55eed0f1a57d25fa67f77
|
[] |
no_license
|
yaalsn/futu_algo
|
b8f88900ea6ba837bf6686eb10393bd32be49675
|
ecfc2c3a967eb8fb07fef428b336b6829d3b795d
|
refs/heads/master
| 2023-02-10T16:01:48.064819
| 2021-01-07T06:22:47
| 2021-01-07T06:22:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,244
|
py
|
# Copyright (c) billpwchan - All Rights Reserved
# Unauthorized copying of this file, via any medium is strictly prohibited
# Proprietary and confidential
# Written by Bill Chan <billpwchan@hotmail.com>, 2021
import pandas as pd
import logger
from strategies.Strategies import Strategies
pd.options.mode.chained_assignment = None # default='warn'
class RSIThreshold(Strategies):
def __init__(self, input_data: dict, rsi_1=6, rsi_2=12, rsi_3=24, lower_rsi=30, upper_rsi=70, observation=100):
"""
Initialize RSI-Threshold Strategy Instance
:param input_data:
:param rsi_1: RSI Period 1 (Default = 6)
:param rsi_2: RSI Period 2 (Default = 12)
:param rsi_3: RSI Period 3 (Default = 24)
:param lower_rsi: Lower RSI Threshold (Default = 30)
:param upper_rsi: Upper RSI Threshold (Default = 70)
:param observation: Observation Period in Dataframe (Default = 100)
"""
self.RSI_1 = rsi_1
self.RSI_2 = rsi_2
self.RSI_3 = rsi_3
self.LOWER_RSI = lower_rsi
self.UPPER_RSI = upper_rsi
self.OBSERVATION = observation
self.default_logger = logger.get_logger("rsi_threshold")
super().__init__(input_data)
self.parse_data()
def __compute_RSI(self, stock_code, time_window):
diff = self.input_data[stock_code]['close'].diff(1).dropna() # diff in one field(one day)
# this preservers dimensions off diff values
up_chg = 0 * diff
down_chg = 0 * diff
# up change is equal to the positive difference, otherwise equal to zero
up_chg[diff > 0] = diff[diff > 0]
# down change is equal to negative difference, otherwise equal to zero
down_chg[diff < 0] = diff[diff < 0]
# check pandas documentation for ewm
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.ewm.html
# values are related to exponential decay
# we set com=time_window-1 so we get decay alpha=1/time_window
up_chg_avg = up_chg.ewm(com=time_window - 1, min_periods=time_window).mean()
down_chg_avg = down_chg.ewm(com=time_window - 1, min_periods=time_window).mean()
rs = abs(up_chg_avg / down_chg_avg)
rsi = 100 - 100 / (1 + rs)
return rsi
def parse_data(self, latest_data: pd.DataFrame = None):
# Received New Data => Parse it Now to input_data
if latest_data is not None:
# Only need to update MACD for the stock_code with new data
stock_list = [latest_data['code'][0]]
# Remove records with duplicate time_key. Always use the latest data to override
time_key = latest_data['time_key'][0]
self.input_data[stock_list[0]].drop(
self.input_data[stock_list[0]][self.input_data[stock_list[0]].time_key == time_key].index,
inplace=True)
# Append empty columns and concat at the bottom
latest_data = pd.concat([latest_data, pd.DataFrame(columns=['rsi_1', 'rsi_2', 'rsi_3'])])
self.input_data[stock_list[0]] = self.input_data[stock_list[0]].append(latest_data)
else:
stock_list = self.input_data.keys()
# Calculate EMA for the stock_list
for stock_code in stock_list:
# Need to truncate to a maximum length for low-latency
self.input_data[stock_code] = self.input_data[stock_code].iloc[-self.OBSERVATION:]
self.input_data[stock_code][['open', 'close', 'high', 'low']] = self.input_data[stock_code][
['open', 'close', 'high', 'low']].apply(pd.to_numeric)
self.input_data[stock_code]['rsi_1'] = self.__compute_RSI(stock_code=stock_code, time_window=self.RSI_1)
self.input_data[stock_code]['rsi_2'] = self.__compute_RSI(stock_code=stock_code, time_window=self.RSI_2)
self.input_data[stock_code]['rsi_3'] = self.__compute_RSI(stock_code=stock_code, time_window=self.RSI_3)
self.input_data[stock_code].reset_index(drop=True, inplace=True)
def buy(self, stock_code) -> bool:
current_record = self.input_data[stock_code].iloc[-1]
last_record = self.input_data[stock_code].iloc[-2]
# Buy Decision based on RSI值超过了超卖线
buy_decision = current_record['rsi_1'] < self.LOWER_RSI < last_record['rsi_1']
if buy_decision:
self.default_logger.info(
f"Buy Decision: {current_record['time_key']} based on \n {pd.concat([last_record.to_frame().transpose(), current_record.to_frame().transpose()], axis=0)}")
return buy_decision
def sell(self, stock_code) -> bool:
current_record = self.input_data[stock_code].iloc[-1]
last_record = self.input_data[stock_code].iloc[-2]
# Sell Decision based on RSI值超过了超买线
sell_decision = current_record['rsi_1'] > self.UPPER_RSI > last_record['rsi_1']
if sell_decision:
self.default_logger.info(
f"Sell Decision: {current_record['time_key']} based on \n {pd.concat([last_record.to_frame().transpose(), current_record.to_frame().transpose()], axis=0)}")
return sell_decision
|
[
"billpwchan@hotmail.com"
] |
billpwchan@hotmail.com
|
c2844b559fdb82914cecdcaf1d8bd3fdb7f750fb
|
e6ee96e6765ace0169e16e5069d7300d0ffeb286
|
/stepper_motor_test.py
|
132ddd642b14655c38693f427c1524336ec00593
|
[] |
no_license
|
thesiti92/douglas
|
b72bce08af60ee6da4a3239fd75c9a77758804bb
|
85871499e8fdf7f1daa4c2a66065a8fa0f013754
|
refs/heads/master
| 2021-01-23T06:24:52.246917
| 2017-10-06T02:29:28
| 2017-10-06T02:29:28
| 86,361,431
| 0
| 1
| null | 2017-05-31T16:06:09
| 2017-03-27T16:57:22
|
Python
|
UTF-8
|
Python
| false
| false
| 407
|
py
|
from Adafruit_MotorHAT import Adafruit_MotorHAT
mh = Adafruit_MotorHAT()
throttle = mh.getStepper(200, 1)
throttle.setSpeed(40)
steps = int(raw_input("How many Steps"))
dir = raw_input("What direction? f/b")
if dir == "f":
throttle.step(steps, Adafruit_MotorHAT.FORWARD, Adafruit_MotorHAT.DOUBLE)
if dir == "b":
throttle.step(steps, Adafruit_MotorHAT.BACKWARD, Adafruit_MotorHAT.DOUBLE)
|
[
"aiansiti@outlook.com"
] |
aiansiti@outlook.com
|
02b900c7a9f1ca012944504563a0edf18ddccc03
|
482dc3424e52668a308d9defb5e06f9f4b1c1b6f
|
/Final Project/Early Research/basline.py
|
7e68d4661f4dc9451b8c11d3a30c4efa99fef2c9
|
[] |
no_license
|
jai-soni/Advanced-Data-Science
|
5fa29f2604e42ad61ec5802527cbebd7f58e87ec
|
d61fa03bbb61960a3db91b69429b90d86799f8cd
|
refs/heads/master
| 2020-05-01T01:26:47.764202
| 2019-02-15T23:37:14
| 2019-02-15T23:37:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,975
|
py
|
import utils
# Classifies a tweet based on the number of positive and negative words in it
TRAIN_PROCESSED_FILE = 'train-processed.csv'
TEST_PROCESSED_FILE = 'test-processed.csv'
POSITIVE_WORDS_FILE = 'positive-words.txt'
NEGATIVE_WORDS_FILE = 'negative-words.txt'
TRAIN = True
def classify(processed_csv, test_file=True, **params):
file = open('positive-words.txt', 'r')
positive_words = [str(line).split("\n")[0] for line in file.readlines()]
file1 = open('negative-words.txt', 'r')
negative_words = [str(line).split("\n")[0] for line in file.readlines()]
print(positive_words)
predictions = []
with open(processed_csv, 'r') as csv:
for line in csv:
if test_file:
tweet_id, tweet = line.strip().split(',')
else:
tweet_id, label, tweet = line.strip().split(',')
pos_count, neg_count = 0, 0
for word in tweet.split():
print (word)
if word in positive_words:
pos_count += 1
elif word in negative_words:
neg_count += 1
#print(pos_count, neg_count)
prediction = 1 if pos_count >= neg_count else 0
if test_file:
predictions.append((tweet_id, prediction))
else:
predictions.append((tweet_id, int(label), prediction))
return predictions
if __name__ == '__main__':
if TRAIN:
predictions = classify(TRAIN_PROCESSED_FILE, test_file=(not TRAIN), positive_words=POSITIVE_WORDS_FILE, negative_words=NEGATIVE_WORDS_FILE)
correct = sum([1 for p in predictions if p[1] == p[2]]) * 100.0 / len(predictions)
print('Correct = %.2f%%' % correct)
else:
predictions = classify(TEST_PROCESSED_FILE, test_file=(not TRAIN), positive_words=POSITIVE_WORDS_FILE, negative_words=NEGATIVE_WORDS_FILE)
utils.save_results_to_csv(predictions, 'baseline.csv')
|
[
"noreply@github.com"
] |
jai-soni.noreply@github.com
|
8df2a6a2a410ba1ac10b19ccedfdf5c21a2aa8f5
|
192d02957f463c404c5efbe5da5be9a78876b479
|
/local/bin/rst2xml.py
|
ed64c43e9305feac5741667610f188cd95ae5eb0
|
[] |
no_license
|
wasuaje/nikola
|
d27aee5103fb192fd4bc9e0c6afd77455bb5c0c6
|
aad251963f40ae94db2e89f754d2743070ed2151
|
refs/heads/main
| 2023-03-29T21:09:06.067991
| 2021-03-26T17:05:25
| 2021-03-26T17:05:25
| 351,855,036
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 639
|
py
|
#!/home/wasuaje/Documentos/desarrollo/nikola/bin/python
# $Id: rst2xml.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing Docutils XML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates Docutils-native XML from standalone '
'reStructuredText sources. ' + default_description)
publish_cmdline(writer_name='xml', description=description)
|
[
"wasuaje@gmail.com"
] |
wasuaje@gmail.com
|
8a563a31f094e637a21adffb9689c6ce5e279250
|
68003d6244ca5f81b8f04de019a0a3ed36904fba
|
/imagenet/opts.py
|
448ed0652e81289e7bbc552d202bdd9a4b3f7e1a
|
[] |
no_license
|
byungjukim/Incremental
|
067132ebddc7746a9ab41e04f1e34ace77c7e7e3
|
5087075babcf05715f18c85be197277c3cdde84d
|
refs/heads/master
| 2021-01-17T22:46:44.545155
| 2017-03-07T13:39:43
| 2017-03-07T13:39:43
| 84,203,025
| 0
| 0
| null | 2017-03-07T13:36:31
| 2017-03-07T13:36:31
| null |
UTF-8
|
Python
| false
| false
| 4,102
|
py
|
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
## dataset parameter
tf.app.flags.DEFINE_integer( 'num_class', 1000, "Number of classes in dataset")
tf.app.flags.DEFINE_integer( 'num_example_train', 1281167, "Number of training data (images per epoch)")
tf.app.flags.DEFINE_integer( 'num_example_test', 10000, "Number of test data")
tf.app.flags.DEFINE_integer( 'num_example_eval', 50000, "Number of test data")
tf.app.flags.DEFINE_integer( 'image_height', 32, "height of original image")
tf.app.flags.DEFINE_integer( 'image_width', 32, "width of original image")
tf.app.flags.DEFINE_integer( 'image_channel', 3, "channel of original image")
## train parameter
tf.app.flags.DEFINE_string( 'test_dir', './test_tfrecord', "Directory for test data")
tf.app.flags.DEFINE_string( 'eval_dir', './anno_full/unsupervised', "Directory for test data")
tf.app.flags.DEFINE_string( 'eval_save', './anno_full/eval', "Directory for test data")
#tf.app.flags.DEFINE_string( 'train_dir', '/home/user/dataset/ILSVRC2015/Data/CLS-LOC/train', "Directory of training data")
tf.app.flags.DEFINE_string( 'train_dir', './text', "Directory of training data")
tf.app.flags.DEFINE_integer( 'crop_h', 224, "Crop size (height)")
tf.app.flags.DEFINE_integer( 'crop_w', 224, "Crop size (width)")
tf.app.flags.DEFINE_integer( 'crop_ch', 3, "Crop size (channel)")
tf.app.flags.DEFINE_boolean( 'shuffle', True, "Whether to shuffle the batch on training")
tf.app.flags.DEFINE_integer( 'pad_h', 4, "Pad each side of image (height)")
tf.app.flags.DEFINE_integer( 'pad_w', 4, "Pad each side of image (width)")
tf.app.flags.DEFINE_boolean( 'nesterov', True, "Whether to use nesterov on training")
## hyper-parameter
tf.app.flags.DEFINE_integer( 'batch_size', 64, "Number of mini-batch")
tf.app.flags.DEFINE_float( 'momentum', 0.9, "Momentum for SGD")
tf.app.flags.DEFINE_float( 'weight_decay', 0.0002, "Weight decay")
tf.app.flags.DEFINE_float( 'LR', 0.1, "Base learning rate")
tf.app.flags.DEFINE_float( 'LR_decay', 0.1, "Learning rate decaying factor")
tf.app.flags.DEFINE_integer( 'LR_step', 30, "Step width to decay learning rate")
tf.app.flags.DEFINE_integer( 'MAX_EPOCH', 90, "Maximum epoch to train")
tf.app.flags.DEFINE_float( 'eval_threshold', 0.0, "Threshold for evaluation")
## computing parameter
tf.app.flags.DEFINE_integer( 'num_threads', 6, "Number of threads for input processing") #TODO: currently, use single thread for evaluation -> need to modify
tf.app.flags.DEFINE_boolean( 'use_fp16', False, "Whether to use fp16")
tf.app.flags.DEFINE_integer( 'display', 10, "Period to display the current loss")
tf.app.flags.DEFINE_integer( 'save', 10, "Period to save the model (epoch)")
tf.app.flags.DEFINE_string( 'save_path', './checkpoint', "Path to save the model")
tf.app.flags.DEFINE_boolean( 'test_only', False, "Whether to test or train")
tf.app.flags.DEFINE_boolean( 'train', True, "Whether to train or eval")
tf.app.flags.DEFINE_boolean( 'resume', False, "Whether resume or train from scratch")
tf.app.flags.DEFINE_string( 'weights', None, "Specific parameter to restore"
"if None, restore from latest in save path")
tf.app.flags.DEFINE_integer( 'Top_k', 1, "Additional evaluation for top k accuracy")
tf.app.flags.DEFINE_boolean( 'Final_test', False, "Whether to test at the end of training")
tf.app.flags.DEFINE_integer( 'num_gpus', 4, "Number of gpu devices to use")
|
[
"noreply@github.com"
] |
byungjukim.noreply@github.com
|
7c7069d049517f77b6565001183f83bf93bd1b19
|
3e9ef7cda8889f1be76a095becaebbf89f433642
|
/main3.py
|
f37430deee44235ee033eebb9d970c71b02616e4
|
[] |
no_license
|
thandosi/temp-converter
|
1781714fec8d1536c857c9da6730b1df89a3c1d7
|
8722209065daaac6ce0a54c5b4500e0f0f3bd26b
|
refs/heads/main
| 2023-05-04T14:17:59.579843
| 2021-05-24T08:05:18
| 2021-05-24T08:05:18
| 369,546,265
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,482
|
py
|
# importing tkinter
import tkinter
# creating a window
root = tkinter.Tk()
# naming the title of the window
root.title("Temperature Converter")
# setting the size of the window
root.geometry("800x600")
# bg color for the window
root.config(bg="purple")
l1 = tkinter.LabelFrame(root, text="Celsius to Fahrenheit", padx=20, pady=20)
l1.grid(row=2, column=0)
e1 = tkinter.Entry(l1, state="disable")
e1. grid(row=4, column=0)
# setting the state for Celsius
def cel_active():
e2.configure(state='disable')
e1.configure(state="normal")
btn_active = tkinter.Button(root, text="Activate -Celsius to Fahrenheit", command=cel_active)
btn_active.grid(row=6, column=0)
l2 = tkinter.LabelFrame(root, text="Fahrenheit to Celsius", padx=20, pady=20)
l2.grid(row=2, column=5)
e2 = tkinter.Entry(l2, state="disable")
e2.grid(row=4, column=5)
# setting the state for Fahrenheit
def far_active():
e1.configure(state="disable")
e2.configure(state="normal")
btn_active1 =tkinter.Button(root, text="Activate-Fahrenheit to Celsius", command=far_active)
btn_active1.grid(row=6, column=5)
# function for exit/ close the window/ program
def close():
root.destroy()
exit_btn = tkinter.Button(text="Exit Program", command=close)
exit_btn.grid(row=9, column=6)
# function for converting celsius to fahrenheit
def convert_C():
if e1["state"] == "normal" and e1.get() != "":
celsius = float(e1.get())
fahrenheit = (celsius*9/5)+32
result_entry.insert(0, str(fahrenheit))
# function for converting fahrenheit to celsius
def convert_f():
if e2["state"] == "normal" and e2.get() != "":
fahrenheit = float(e2.get())
celsius = (fahrenheit-32)*5/9
result_entry.insert(0, celsius)
result_btn = tkinter.Button(root, text="Convert C to F", command=convert_C)
result_btn.grid(row=7, column=2)
# action button for converting Fahrenheit to Celsius and calling the convert_f()
result_btn2 = tkinter.Button(root, text="Convert F to C", command=convert_f)
result_btn2.grid(row=7, column=4)
# creating the result_entry or output entry
result_entry = tkinter.Entry(root, )
result_entry.grid(row=8, column=2)
# function that will delete the figure in the Entry box/ input box
def clear():
e1.delete(0)
e2.delete(0)
result_entry.delete(0)
# creating the Clear button and calling the clear()
clear_btn = tkinter.Button(root, text="Clear", command=clear)
clear_btn.grid(row=8, column=6)
root.mainloop()
|
[
"siyanjomeni@gmail.com"
] |
siyanjomeni@gmail.com
|
6032b2592dd176bbdd07abd0ff17d4268d1b7053
|
9ef357b77a503afb11bb8b684321448d13d10f7c
|
/palletize.py
|
8181cfa64a17e675b71cfae6c7bf2caa4a5d105d
|
[] |
no_license
|
timconnorz/GradStreet-Inventory
|
4dc1657155b75d3af48e1352c25fcf990744ebd7
|
977540d7322fecf4077b6d9a558b3380a5c88820
|
refs/heads/master
| 2020-05-05T05:30:39.617400
| 2019-04-05T21:13:48
| 2019-04-05T21:13:48
| 179,755,279
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,666
|
py
|
#reads in packing GUIDE
# exports the palletizing GUIDE
# must be manually entered into the palletizing guide on google sheets
import csv
IMPORTANTcsv_folder_path = "C:\\Users\\User\\Google Drive\\GradStreet\\InventoryTracker\\IMPORTANT\\csv_files\\"
CREATED_folder_path = "C:\\Users\\User\\Google Drive\\GradStreet\\InventoryTracker\\CREATED\\"
def palletize(packing_guide_path):
# boxes is a dictionary with each box paired with it's fulfiller (manual or amazon)
boxes = {}
with open(packing_guide_path, 'r') as csv_file:
fin = csv.reader(csv_file,delimiter=',')
for row in fin:
#print(row)
try:
if int(row[0]) not in boxes:
if len(row[6]) == 10:
boxes[int(row[0])] = "Amazon"
else:
boxes[int(row[0])] = "Tim"
except:
continue
#print(boxes)
with open(CREATED_folder_path + "manual-palletizing-guide.csv",'w',newline='') as csv_file:
fout = csv.writer(csv_file,delimiter=',')
counter = 0
pallet_num = 1
prior_fulfiller = "Amazon" #default value
fout.writerow(["Pallet Number","Box Number","Label"])
for box in boxes:
if boxes[box] != prior_fulfiller:
counter = 0
pallet_num += 1
fout.writerow([pallet_num,box,"Pallet Label " + str(pallet_num)])
counter += 1
if counter == 24:
counter = 0
pallet_num += 1
prior_fulfiller = boxes[box]
palletize(IMPORTANTcsv_folder_path + "MANUAL_PACKING_GUIDE.csv")
|
[
"timconnors@yahoo.com"
] |
timconnors@yahoo.com
|
2c7e9875c5f33b8ac533f4ae62c6e6d26bc828ec
|
0fced8b2e5e14f8b4bbd58a919cfc60f24963c07
|
/fmath.py
|
66c833e2556ad6b2544330a14f76527e92c47e02
|
[] |
no_license
|
Giorc93/Python-Int
|
060ec956bd1ad25ad4c60c1d5c2ce3e0512375be
|
d576681bd0b4324f1a5872849f855eda6b3ca610
|
refs/heads/master
| 2022-08-28T16:35:45.570104
| 2020-05-19T20:07:12
| 2020-05-19T20:07:12
| 265,352,617
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 73
|
py
|
def add(n1, n2):
return n1 + n2
def sust(n1, n2):
return n2-n1
|
[
"example@test.com"
] |
example@test.com
|
4a39c69dcb39a1fc6003762acb6e802d290f0d2f
|
baf824f8819f90928e11480d0eae89efb60341a1
|
/tests/perf_bench/viper_call1c.py
|
c653eb8d3ee18e376ce531e4a8e70994902bd86b
|
[
"MIT"
] |
permissive
|
RockySong/micropython-rocky
|
549770723ba92cb311c468880ead0ffdd4fa8fe5
|
2d728f414bf8d041ca609e00448850759aade3cd
|
refs/heads/omv_initial_integrate
| 2021-05-12T12:20:18.404341
| 2021-01-15T01:15:48
| 2021-01-15T01:15:48
| 117,408,452
| 198
| 90
|
MIT
| 2020-08-25T03:31:32
| 2018-01-14T06:40:36
|
C
|
UTF-8
|
Python
| false
| false
| 351
|
py
|
@micropython.viper
def f1c(x:int) -> int:
return x
@micropython.native
def call(r):
f = f1c
for _ in r:
f(1)
bm_params = {
(50, 10): (15000,),
(100, 10): (30000,),
(1000, 10): (300000,),
(5000, 10): (1500000,),
}
def bm_setup(params):
return lambda: call(range(params[0])), lambda: (params[0] // 1000, None)
|
[
"rock.song@hotmail.com"
] |
rock.song@hotmail.com
|
754c611cc6b2ee1d943e4363594e68673273ec58
|
7125134946e82c15f18ce6408e243b44236d7a40
|
/private_ipynb/SeSu_learning.py
|
6b3862d5b5df13c59949c9f4dc7129135bb24452
|
[] |
no_license
|
isthegoal/XWBank_Top2_solution
|
8654cdd96ddf5c584e62f9162c0178123a41bd41
|
e788bbeeb20d5f00b5e71c8e9f9f10bb5deb0071
|
refs/heads/master
| 2020-05-15T04:33:09.839447
| 2019-04-25T16:48:29
| 2019-04-25T16:48:29
| 182,088,709
| 3
| 0
| null | null | null | null |
GB18030
|
Python
| false
| false
| 20,007
|
py
|
# coding=gbk
import h5py
import numpy as np
from private_ipynb.lgb import lgb_model
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
import lightgbm as lgb
import time
import numpy as np
import h5py
import pandas as pd
from pandas import *
import pickle
'''
思想(一一尝试):
*使用模型加阈值加分组因素做一次 (在 Sesu_learning中)
*使用knn针对重要的样本、将重要的无缺失的特征进行训练,之后一个个带进去试试这样的无标签预测是否可以
*使用外加的方式不断跑模型筛选特征看效果【对于交叉特征 和 排序特征】
*对训练集中的正例样本进行复制扩展,并在扩展出的样本中加入高斯扰动。
*关于验证方式的思考,如果把分组信息参与到验证的cv中嫩,如果使用使用全量数据直接跑模型提交呢。
'''
def the_valid_model(X,X_sesu,y,y_sesu,test_data):
print("start:********************************")
start = time.time()
# 进行分离,原特征和半监督特征的分离
N = 5
skf = StratifiedKFold(n_splits=N, shuffle=True, random_state=2018)
auc_cv = []
pred_cv = []
for k, (train_in, test_in) in enumerate(skf.split(X, y)):
X_train, X_test, y_train, y_test = X[train_in], X[test_in], \
y[train_in], y[test_in]
X_train=np.concatenate([X_train, X_sesu], axis=0)
y_train=np.concatenate([y_train, y_sesu], axis=0)
# 数据结构
lgb_train = lgb.Dataset(X_train, y_train)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)
# 设置参数
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': {'auc'},
'max_depth': 4,
'min_child_weight': 6,
'num_leaves': 16,
'learning_rate': 0.02,
'feature_fraction': 0.7,
'bagging_fraction': 0.7,
'bagging_freq': 5,
# 'lambda_l1':0.25,
# 'lambda_l2':0.5,
# 'scale_pos_weight':691.0/14309.0, 不能设置
# 'num_threads':4,
}
print('................Start training..........................')
# train
gbm = lgb.train(params,
lgb_train,
num_boost_round=2000,
valid_sets=lgb_eval,
early_stopping_rounds=100,
verbose_eval=100)
print('................Start predict .........................')
# 预测
y_pred = gbm.predict(X_test, num_iteration=gbm.best_iteration)
# 评估 roc计算评估得分
tmp_auc = roc_auc_score(y_test, y_pred)
auc_cv.append(tmp_auc)
print("valid auc:", tmp_auc)
# test
pred = gbm.predict(test_data, num_iteration=gbm.best_iteration)
pred_cv.append(pred)
# K交叉验证的平均分数
print('the cv information:')
print(auc_cv)
print('cv mean score', np.mean(auc_cv))
end = time.time()
print("......................run with time: ", (end - start) / 60.0)
print("over:*********************************")
res = np.array(pred_cv)
print("总的结果:", res.shape)
# 最后结果,mean,max,min
r = res.mean(axis=0)
print('result shape:', r.shape)
return np.mean(auc_cv),r
################################## 方案一函数 ##########################################
def gen_sample_use_model_thre():
print('------------------- 1.对每个样本进行初步预测 -----------------------')
with h5py.File('do_pro_data/pro_data.hdf5') as f:
y = f['y'][0:20000]
X = f['X'][0:20000]
test_data = f['test_data'][0:20000]
sesu_pro_data = f['sesu_pro_data'][0:20000]
print(sesu_pro_data)
the_train_score, the_pred_label = lgb_model(X, y, sesu_pro_data)
print(len(the_pred_label))
print('------------------- 2.探寻阈值 -----------------------')
#遍历查找合适的threshold 从0.02开始,每次增加0.02
threshold = 0.118
the_feature_X=X
the_label_y=y
file_context = open('record/sesu_record.txt','w')
while(threshold<0.8):
the_feature_X = X
new_pre_label_list = []
for i in the_pred_label:
if i > threshold:
new_pre_label_list.append(1)
else:
new_pre_label_list.append(0)
#the_feature_X=np.concatenate([the_feature_X, sesu_pro_data], axis=0)
#new_pre_label_list=np.concatenate([y, new_pre_label_list], axis=0)
print('合并成的数组the_feature_X的维度为:',the_feature_X.shape)
#print('合并成的数组y的维度为:', new_pre_label_list.shape)
#我们只使用正例样本,所以对于半监督结果进行进一步的提取
the_con_dataset=pd.DataFrame(pd.concat([pd.DataFrame(sesu_pro_data),pd.DataFrame(new_pre_label_list,columns=['label'])],axis=1))
the_con_dataset.to_csv('../feature_data/sesu_mothod1_concat.csv')
#print([i for i in the_con_dataset.columns if i not in ['label']])
the_con_dataset=the_con_dataset[the_con_dataset['label']==1]
print('the con shape:',the_con_dataset.shape)
sesu_pro_data=the_con_dataset[[i for i in the_con_dataset.columns if i not in ['label']]]
new_pre_label_list=the_con_dataset['label'].values
the_record_score, _ = the_valid_model(the_feature_X,sesu_pro_data, y,new_pre_label_list,test_data)
file_context = open('record/sesu_record.txt', 'a+')
file_context.writelines('当前的threshold为:'+str(threshold)+' 得分:'+str(the_record_score)+'\n')
print('写入的信息为:','当前的threshold为:'+str(threshold)+' 得分:'+str(the_record_score))
#np.array(list(the_feature_X).extend(sesu_pro_data))
#np.array(list(y).extend(new_pre_label_list))
print('***********************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************************')
file_context.close()
threshold=threshold+0.001
file_context.close()
def create_sample_use_model_thre():
print('------------------- 1.对每个样本进行初步预测 -----------------------')
with h5py.File('do_pro_data/pro_data.hdf5') as f:
y = f['y'][0:20000]
X = f['X'][0:20000]
test_data = f['test_data'][0:20000]
sesu_pro_data = f['sesu_pro_data'][0:20000]
print(sesu_pro_data)
the_train_score, the_pred_label = lgb_model(X, y, sesu_pro_data)
print(len(the_pred_label))
print('------------------- 2.阈值预测 -----------------------')
#遍历查找合适的threshold 从0.02开始,每次增加0.02
threshold = 0.118
the_feature_X=X
the_label_y=y
the_feature_X = X
new_pre_label_list = []
for i in the_pred_label:
if i > threshold:
new_pre_label_list.append(1)
else:
new_pre_label_list.append(0)
#the_feature_X=np.concatenate([the_feature_X, sesu_pro_data], axis=0)
#new_pre_label_list=np.concatenate([y, new_pre_label_list], axis=0)
print('合并成的数组the_feature_X的维度为:',the_feature_X.shape)
#print('合并成的数组y的维度为:', new_pre_label_list.shape)
# 我们只使用正例样本,所以对于半监督结果进行进一步的提取
the_con_dataset = pd.DataFrame(
pd.concat([pd.DataFrame(sesu_pro_data), pd.DataFrame(new_pre_label_list, columns=['label'])], axis=1))
the_con_dataset.to_csv('../feature_data/sesu_mothod1_concat.csv')
# print([i for i in the_con_dataset.columns if i not in ['label']])
the_con_dataset = the_con_dataset[the_con_dataset['label'] == 1]
print('the con shape:', the_con_dataset.shape)
sesu_pro_data = the_con_dataset[[i for i in the_con_dataset.columns if i not in ['label']]]
new_pre_label_list = the_con_dataset['label'].values
#开始做预测
the_record_score, result_file = the_valid_model(the_feature_X,sesu_pro_data, y,new_pre_label_list,test_data)
print('是这个score:',the_record_score)
filepath = '../result/lgb_半监督简单方案1改只正样本_'+ str(the_record_score)+'.csv' # 线下平均分数
# 转为array
print('result shape:',result_file.shape)
sub_sample=pd.read_csv('../result/xgb_nan.csv')
result = DataFrame()
result['cust_id'] = sub_sample['cust_id']
result['pred_prob'] = result_file
result.to_csv(filepath,index=False,sep=",")
################################## 方案二函数 ##########################################
def use_group_number_se():
print('--------------------- 1.执行初步预测 ------------------------')
with h5py.File('do_pro_data/pro_data.hdf5') as f:
y = f['y'][0:20000]
X = f['X'][0:20000]
test_data = f['test_data'][0:20000]
sesu_pro_data = f['sesu_pro_data'][0:20000]
print(sesu_pro_data)
the_train_score, the_pred_label = lgb_model(X, y, sesu_pro_data)
print(len(the_pred_label))
print('--------------------- 2.获取组内正负样本大致数量 ------------------------')
train_x=pd.read_csv("../feature_data/suse_test_data.csv",header=0,sep=",")
print(train_x.columns)
print(train_x['cust_group'].value_counts())
'''
利用原train数据中的信息:
group1 -> 4544:456 = 10:1
group2 -> 4871:129 = 37:1
group3 -> 4894:106 = 46:1
无标签样本中:
group_1 3500 /10=350
group_2 3500 /37=95
group_3 3000 /46=65
'''
print('--------------------- 3.根据取到的数值进行排序选取 ------------------------')
train_x['the_pre_y_prob']=the_pred_label
i=0
# def group_nei(the_new_data):
# print('!!!!!!!!!!!!:',the_new_data)
# the_new_data=pd.DataFrame(the_new_data)
# the_new_data.sort_values('the_pre_y_prob',inplace=True,ascending=False)
# print(the_new_data)
# i=1
# print('i为: ',i)
# return the_new_data[:50]
# b=(train_x.groupby(['cust_group'])).apply(group_nei)
# print(b['group_1'])
'''
分组并排序, 自己写的话真麻烦,直接找简洁方式都是可以实现的
其中这里method是一种如果有重复情况下的方式选择
'''
train_x['group_sort'] = train_x['the_pre_y_prob'].groupby(train_x['cust_group']).rank(ascending=0, method='first')
dataframe1=train_x[(train_x['cust_group']=='group_1') & (train_x['group_sort']<=350)]
dataframe1['the_pre_y_prob'] = 1
#print(dataframe1)
dataframe2=train_x[(train_x['cust_group']=='group_2') & (train_x['group_sort']<=95)]
dataframe2['the_pre_y_prob'] = 1
#print(dataframe2)
dataframe3=train_x[(train_x['cust_group']=='group_3') & (train_x['group_sort']<=65)]
dataframe3['the_pre_y_prob'] = 1
#print(dataframe3)
the_big_frame=pd.concat([dataframe1,dataframe2,dataframe3])
print(the_big_frame)
#train_x.to_csv('../feature_data/do_group_sort.csv')
print('--------------------- 4.跑模型得到半监督结果 ------------------------')
column=[i for i in the_big_frame.columns if i not in ['group_sort','the_pre_y_prob','cust_group','Unnamed: 0']]
the_feature_X=the_big_frame[column]
new_pre_label_list=the_big_frame['group_sort']
the_record_score, result_file = the_valid_model(X, the_feature_X, y, new_pre_label_list, test_data)
#效果提交
filepath = '../result/lgb_根据group数量关系方式2_' + str(the_record_score) + '.csv' # 线下平均分数
# 转为array
print('result shape:', result_file.shape)
sub_sample = pd.read_csv('../result/xgb_nan.csv')
result = DataFrame()
result['cust_id'] = sub_sample['cust_id']
result['pred_prob'] = result_file
result.to_csv(filepath, index=False, sep=",")
pass
################################## 方案三函数 ##########################################
def knn_gen_method():
print('------- 简单 前置工作 --------')
from sklearn import datasets,neighbors
with h5py.File('do_pro_data/pro_data.hdf5') as f:
y = f['y'][0:20000]
X = f['X'][0:20000]
test_data = f['test_data'][0:20000]
sesu_pro_data = f['sesu_pro_data'][0:20000]
print(sesu_pro_data)
print('------- 加载数据并做列名处理 --------')
suse_test=pd.read_csv("../feature_data/suse_test_data.csv",header=0,sep=",")
suse_train=pd.read_csv("../feature_data/suse_all_train.csv",header=0,sep=",")
suse_test=suse_test.drop(columns=['Unnamed: 0', 'cust_group'])
suse_train=suse_train.drop(columns=['Unnamed: 0', 'cust_group', 'cust_id'])
the_train_y=suse_train.pop('y')
print(len(suse_test.columns))
print(len(suse_train.columns))
suse_train.rename(
columns={'nan_(35.895, 51.0]':'nan1','nan_(51.0, 66.0]':'nan2','nan_(66.0, 81.0]':'nan3','nan_(81.0, 96.0]':'nan4','nan_(96.0, 111.0]':'nan5','nan_(111.0, 126.0]':'nan6','nan_(126.0, 141.0]':'nan7'},inplace=True)
suse_test.rename(
columns={'nan_(33.893, 49.286]': 'nan1', 'nan_(49.286, 64.571]': 'nan2', 'nan_(64.571, 79.857]': 'nan3',
'nan_(79.857, 95.143]': 'nan4', 'nan_(95.143, 110.429]': 'nan5', 'nan_(110.429, 125.714]': 'nan6',
'nan_(125.714, 141.0]': 'nan7'}, inplace=True)
print('------- 找到无缺失列,并做归一化处理 --------')
test_have_nan_columns=[]
for i in suse_test.columns:
if -99 not in list(suse_test[i]):
test_have_nan_columns.append(i)
print('这列没有缺失:',i)
train_have_nan_columns=[]
for i in suse_train.columns:
if -99 not in list(suse_train[i]):
train_have_nan_columns.append(i)
print('这列没有缺失:',i)
the_jiao_list=list(set(test_have_nan_columns)&set(train_have_nan_columns))
the_new_train=suse_train[the_jiao_list]
the_new_test=suse_test[the_jiao_list]
#归一化处理
suse_test_norm = the_new_test.apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)))
suse_train_norm = the_new_train.apply(lambda x: (x - np.min(x)) / (np.max(x) - np.min(x)))
print('------- 训练模型 --------')
kn_clf=neighbors.KNeighborsClassifier()
kn_clf.fit(suse_train_norm,the_train_y)
the_probility=pd.DataFrame(kn_clf.predict_proba(suse_test_norm))
print('------- 利用预测概率生成无标签样本结果 --------')
print('概率展示为:',the_probility)
the_probility.to_csv('../feature_data/the_probility.csv')
print('列名是:',the_probility.columns)
the_probility.rename(
columns={0:'the_prob_0',1:'the_prob_1'},inplace=True)
suse_test['sample_prob']=the_probility['the_prob_1']
suse_test['sample_prob_rank'] = suse_test['sample_prob'].rank(ascending=0, method='first')
data_frame=suse_test[suse_test['sample_prob_rank']<100]
data_frame['sample_prob']=1
data_frame=data_frame.drop(columns='sample_prob_rank')
data_frame.to_csv('../exp_show/the_test.csv')
print('------- 融入样本,线下cv结果 --------')
new_pre_label_list=data_frame.pop('sample_prob')
the_record_score, result_file = the_valid_model(suse_train.values, data_frame.values, the_train_y, new_pre_label_list, test_data)
#效果提交
filepath = '../result/lgb_使用knn做分类生成300正例方式3_' + str(the_record_score) + '.csv' # 线下平均分数
# 转为array
print('result shape:', result_file.shape)
sub_sample = pd.read_csv('../result/xgb_nan.csv')
result = DataFrame()
result['cust_id'] = sub_sample['cust_id']
result['pred_prob'] = result_file
result.to_csv(filepath, index=False, sep=",")
pass
if __name__=='__main__':
######################################## 方案一.使用纯阈值划分方式(无组考虑) #################################
'''
实验去寻找合适的阈值 对于方案一还是不要使用全加进入样本了吧 , 本身就是不均衡的,当再加入样本之后,
不均衡方案会相当严重,所以这种方式的较优策略是只是把阈值划分后,为正例的样本加过去。
'''
#gen_sample_use_model_thre()
#获取阈值下的合适的结果
#create_sample_use_model_thre()
######################################## 方案二.使用按数量比例和概率划分方式(使用组考虑) #################################
#use_group_number_se()
######################################## 方案三.使用knn自归类方式(无组考虑,其实空值当-99能够在一些树模型中直接使用,代表这个人极其懒的意义) #################################
#knn自回归加次数限制
knn_gen_method()
######################################## 方案四.强行造复制正样本加高斯噪声 #################################
#--------------------------------------------------------------------------------------------------------------------------------------------
'''
1.方案一:
*使用简单阈值方式有两个大于0.823
当前的threshold为:0.11800000000000009 得分:0.8235630178730116
当前的threshold为:0.05100000000000004 得分:0.8231970317080327
使用这种全量放置全部划分后的样本的方式,效果会变差(0.74+),
*改进下,我们只把划分好的正例样本放置进去,看看效果。
2.方案二:
按group数量进行切分的方案:0.8203
3.方案三:
*感觉使用knn的方式真的是好差,效果显示,这样的使用无缺失的特征进行预测,当使用200个作为补全时候效果还行,当
生成300正例时 0.8189
生成200正例时 0.8202
'''
|
[
"1091714856@qq.com"
] |
1091714856@qq.com
|
05718f79434ab530895713055604c343fd4de2a5
|
ac8ffabf4d7339c5466e53dafc3f7e87697f08eb
|
/python_solutions/1370.increasing-decreasing-string.py
|
d3dea9772fc98df53aaed2ff588b6be808788a4b
|
[] |
no_license
|
h4hany/leetcode
|
4cbf23ea7c5b5ecfd26aef61bfc109741f881591
|
9e4f6f1a2830bd9aab1bba374c98f0464825d435
|
refs/heads/master
| 2023-01-09T17:39:06.212421
| 2020-11-12T07:26:39
| 2020-11-12T07:26:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,794
|
py
|
from collections import Counter, defaultdict, OrderedDict, deque
from bisect import bisect_left, bisect_right
from functools import reduce, lru_cache
import itertools
import math
import string
true = True
false = False
MIN, MAX = -0x3f3f3f3f, 0x3f3f3f3f
#
# @lc app=leetcode id=1370 lang=python3
#
# [1370] Increasing Decreasing String
#
# https://leetcode.com/problems/increasing-decreasing-string/description/
#
# algorithms
# Easy (78.64%)
# Total Accepted: 4.6K
# Total Submissions: 5.8K
# Testcase Example: '"aaaabbbbcccc"'
#
# Given a string s. You should re-order the string using the following
# algorithm:
#
#
# Pick the smallest character from s and append it to the result.
# Pick the smallest character from s which is greater than the last appended
# character to the result and append it.
# Repeat step 2 until you cannot pick more characters.
# Pick the largest character from s and append it to the result.
# Pick the largest character from s which is smaller than the last appended
# character to the result and append it.
# Repeat step 5 until you cannot pick more characters.
# Repeat the steps from 1 to 6 until you pick all characters from s.
#
#
# In each step, If the smallest or the largest character appears more than once
# you can choose any occurrence and append it to the result.
#
# Return the result string after sorting s with this algorithm.
#
#
# Example 1:
#
#
# Input: s = "aaaabbbbcccc"
# Output: "abccbaabccba"
# Explanation: After steps 1, 2 and 3 of the first iteration, result = "abc"
# After steps 4, 5 and 6 of the first iteration, result = "abccba"
# First iteration is done. Now s = "aabbcc" and we go back to step 1
# After steps 1, 2 and 3 of the second iteration, result = "abccbaabc"
# After steps 4, 5 and 6 of the second iteration, result = "abccbaabccba"
#
#
# Example 2:
#
#
# Input: s = "rat"
# Output: "art"
# Explanation: The word "rat" becomes "art" after re-ordering it with the
# mentioned algorithm.
#
#
# Example 3:
#
#
# Input: s = "leetcode"
# Output: "cdelotee"
#
#
# Example 4:
#
#
# Input: s = "ggggggg"
# Output: "ggggggg"
#
#
# Example 5:
#
#
# Input: s = "spo"
# Output: "ops"
#
#
#
# Constraints:
#
#
# 1 <= s.length <= 500
# s contains only lower-case English letters.
#
#
#
class Solution:
def sortString(self, s: str) -> str:
arr = [0] * 26
for c in s:
arr[ord(c) - 97] += 1
res, pivot = "", 0
cta, ctb = 0, len(s)
while cta < ctb:
for i in range(26):
j = i if pivot == 0 else 25 - i
if arr[j] > 0:
res += chr(j + 97)
arr[j] -= 1
cta += 1
pivot = 1 - pivot
return res
sol = Solution()
s = "aaaabbbbcccc"
print(sol.sortString(s))
|
[
"ssruoz@gmail.com"
] |
ssruoz@gmail.com
|
ceb52672c70129b482b652634097fe45dca5d3e7
|
9ecafbb7df876b377ed8ed2472dbd9ab38bb41d4
|
/pidealfm.py
|
56f0fa31062bff04bcfb70e3fb30243ac134b302
|
[] |
no_license
|
santospat-ti/Python_ExEstruturaSequencial
|
51de3573afd39b3954b5e535559fde636d74f752
|
2c2065b773676990e3f1b8cc11d06057080785d3
|
refs/heads/main
| 2023-02-24T07:17:49.078818
| 2021-01-29T00:21:33
| 2021-01-29T00:21:33
| 333,991,710
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 735
|
py
|
#Tendo como dado de entrada a altura (h) de uma pessoa,
# construa um algoritmo que calcule seu peso ideal,
# utilizando as seguintes fórmulas:
#Para homens: (72.7*h) - 58
#Para mulheres: (62.1*h) - 44.7
h = float(input('Digite sua altura: '))
peso = float(input('Digite seu peso: '))
sexo = str(input('Digite seu sexo: ')).strip().upper()
while sexo not in 'F/M':
sexo = str(input('Digite seu sexo: [F/M] ')).strip().upper()
peso_ideal = (72.2*h) - 58 if sexo == 'M' else (62.1*h) - 44.7
if peso < peso_ideal:
print('Abaixo do peso ideal.')
elif peso == peso_ideal:
print('Dentro do peso ideal.')
else:
print('Acima do peso ideal.')
print(f'Peso: {peso:.2f}. / Peso ideal {peso_ideal:.2f}')
|
[
"noreply@github.com"
] |
santospat-ti.noreply@github.com
|
10561316fb2488d5ff5655a5c0acdda48f0d47dc
|
78d3d78ebded691dd6a92f357c7cc75004ff2184
|
/weak_localization/L15_W0p012_highres/plot3.py
|
35695e03b1649b42c24a0db891a9213d1e6e87a0
|
[] |
no_license
|
rafaelha/paper_zbcp
|
0b5bb9500d997ab99cea9959998e3651be75483b
|
db2096eb0cb2d7bb801b4e513320adc9cef7a0d9
|
refs/heads/master
| 2023-02-25T01:41:24.728767
| 2019-09-06T17:24:16
| 2019-09-06T17:24:16
| 199,933,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,932
|
py
|
import matplotlib as mpl
mpl.use('Agg')
import pickle
import numpy as np
import matplotlib.pyplot as plt
import glob
files = glob.glob('*.pickle')
def get_size():
r = open(files[0], 'rb')
pickle.load(r)
pickle.load(r)
pickle.load(r)
pickle.load(r)
pickle.load(r)
pickle.load(r)
pickle.load(r)
ss = pickle.load(r)
r.close()
return len(ss)
l = get_size()
count = 0
duration = 0
en = np.zeros(l)
Reh = np.zeros(l)
Ree = np.zeros(l)
N = np.zeros(l)
G = np.zeros(l)
for f in files:
reader = open(f,'rb')
try:
while True:
Lx = pickle.load(reader)
Ly = pickle.load(reader)
Lz = pickle.load(reader)
delta = pickle.load(reader)
W = pickle.load(reader)
duration += pickle.load(reader)
mu = pickle.load(reader)
en2 = pickle.load(reader)
en += en2
N += pickle.load(reader)
Ree += pickle.load(reader)
Reh += pickle.load(reader)
G2 = pickle.load(reader)
G += G2
Bx = pickle.load(reader)
gap = pickle.load(reader)
g = pickle.load(reader)
mu_local = pickle.load(reader)
B1 = pickle.load(reader)
seed = pickle.load(reader)
count += 1
cond2 = np.logical_and(en2<=delta, en2>=-delta)
en2 = np.block([en2, -en2[cond2]]) * 1e6
ind = np.argsort(en2)
en2 = en2[ind]
G2 = np.block([G2,G2[cond2]])[ind]
plt.figure()
plt.plot(en2, G2)
plt.xlim((-130,130))
plt.xlabel('Bias in $\mu$eV')
plt.ylabel('Conductance <G> ('+str(count)+' realizations)')
plt.title('$L_x=$'+str(Lx)+', $L_y=$'+str(Ly)+', $L_z=$'+str(Lz)\
+', W='+str(round(W*1000))+'meV, $\Delta=$'+str(np.round(delta*1e6,1))+'$\mu$eV, $\mu$='+str(mu*1000)+'meV, $B_1$='+str(B1)+', t='\
+str(np.round(duration))+'s')
plt.savefig('fig/fig_cond_'+str(count)+'.pdf')
plt.close()
except EOFError:
reader.close()
en = en / count
Reh = Reh / count
Ree = Ree / count
N = N / count
G = G / count
duration /= count
cond = np.logical_and(en<=delta, en>=-delta)
en = np.block([en, -en[cond]]) * 1e6
ind = np.argsort(en)
en = en[ind]
G = np.block([G,G[cond]])[ind]
print('Realizations: '+str(count))
print('duration='+str(duration)+'s')
plt.ion()
plt.figure()
plt.plot(en, G)
plt.xlim((-130,130))
plt.xlabel('Bias in $\mu$eV')
plt.ylabel('Conductance <G> ('+str(count)+' realizations)')
plt.title('$L_x=$'+str(Lx)+', $L_y=$'+str(Ly)+', $L_z=$'+str(Lz)\
+', W='+str(round(W*1000))+'meV, $\Delta=$'+str(np.round(delta*1e6,1))+'$\mu$eV, $\mu$='+str(mu*1000)+'meV, $B_1$='+str(B1)+', t='\
+str(np.round(duration))+'s')
plt.savefig('fig_avg_cond_fold.pdf')
|
[
"rafaelhaenel@phas.ubc.ca"
] |
rafaelhaenel@phas.ubc.ca
|
0c729b9ef4e31f3f9af216d3dfd59a80d7573afb
|
2640159bbad2a4b7b16a113d04d98581766f6f02
|
/Experiment-Control/runisl.py
|
e9a3af1db10622a7fedb258cf0c5df9e131bd042
|
[] |
no_license
|
AndroidSim/Virtual-Experiments
|
abe8125a9cfbb606957bd3ab7e2476f1499d8baf
|
56d46b5098c8752d72fc5bd97d585777ab7a69a8
|
refs/heads/master
| 2022-09-14T18:52:44.344545
| 2022-08-12T20:06:59
| 2022-08-12T20:06:59
| 154,492,833
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,110
|
py
|
#!/usr/bin/python
import os
import javabridge
##
## without running "ant jar" to create the dist/isl.jar
##
javaclasspath = [os.path.realpath(os.path.join(os.path.dirname(__file__),'..','build','classes'))] + \
[os.path.realpath(os.path.join(os.path.dirname(__file__),'..','cfg'))] + \
[os.path.realpath(os.path.join(os.path.dirname(__file__),'..','lib', name + '.jar'))
for name in ['hamcrest-core-1.1', 'jmf', 'colt-1.2.0', 'iText-2.1.5', 'junit4-4.8.2',
'commons-math-2.2', 'jcommon-1.0.16', 'logback-classic-0.9.28',
'mason17-with-src', 'ecj19', 'jfreechart-1.0.13', 'logback-core-0.9.28',
'slf4j-api-1.6.1']] + \
javabridge.JARS
##
## using dist/isl.jar
##
#javaclasspath = [os.path.realpath(os.path.join(os.path.dirname(__file__),'..','dist', 'isl.jar'))] + \
# [os.path.realpath(os.path.join(os.path.dirname(__file__),'..','cfg'))] + \
# javabridge.JARS
#print os.pathsep.join(javaclasspath)
javabridge.start_vm(class_path=[os.pathsep.join(javaclasspath)], run_headless=True)
try:
print javabridge.get_env().get_version()
print javabridge.run_script('java.lang.System.getProperty("java.class.path")')
###
## rhino invocation of isl.Main.main(String[] args)
###
#print javabridge.run_script('Packages.isl.Main.main([])');
###
## high invocation of isl.Main.main(String[] args)
###
#javabridge.static_call("isl/Main", "main", "([Ljava/lang/String;)V", [])
###
## low API invocation of isl.Main.main(String[] args)
###
main_class = javabridge.get_env().find_class("isl/Main");
main_method_id = javabridge.get_env().get_static_method_id(main_class,'main','([Ljava/lang/String;)V')
###### ways to construct the String[] argument
## rhino construction of String[]
#main_args = javabridge.run_script("[]") # cheat by creating the array in rhino
## high construction of String[]
#main_args = javabridge.make_list()
### low construction of String[]
string_class = javabridge.get_env().find_class("java/lang/String")
main_args = javabridge.get_env().make_object_array(0,string_class)
### now that we have the String[] arg in a Java Object form we can invoke!
javabridge.get_env().call_static_method(main_class,main_method_id,main_args)
###
## other Examples
###
### Use the high and low level apis to create an Integer and a MyInt
#print('Using api: ')
#mic = javabridge.get_env().find_class("isl/util/MyInt")
#mi = javabridge.make_instance("isl/util/MyInt", "(J)V", 12)
#print javabridge.call(mi,"doubleValue","()D")
#
#i = javabridge.make_instance("java/lang/Integer","(I)V", 12)
#print javabridge.call(i,"toString","()Ljava/lang/String;")
### Use Rhino (JavaScript) to create a MyInt
#print('Using Rhino: ')
#print javabridge.run_script('mi = new Packages.isl.util.MyInt(12);mi.doubleValue();')
finally:
javabridge.kill_vm()
|
[
"drandrewksmith@gmail.com"
] |
drandrewksmith@gmail.com
|
da9970c0ebef6b06081af9749578c6b7c257eb78
|
c3b8b65753bbbefbe6185f6d7084c0eeda04d5f7
|
/main.py
|
f3726a66fef40178306ad649f499b8410e0ba35d
|
[] |
no_license
|
qq2568410997/QuantAnalysis
|
75e2d5ef637c26cf218a6707cd58d9ad68476871
|
1912407cceee0169284e0fc6049e168905d5a54f
|
refs/heads/master
| 2022-12-25T03:42:33.538904
| 2020-09-17T17:01:49
| 2020-09-17T17:01:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
import GlobalData
#软件头文件先初始化全局数据
GlobalData.Init()
#软件程序逻辑
import sys
from PyQt5.QtWidgets import QApplication
from PY_MenuWidget import MenuWidget
if __name__ == '__main__':
# 测试Github
App = QApplication(sys.argv)
MainWindow = MenuWidget()
MainWindow.show()
sys.exit(App.exec_())
|
[
"1045255891@qq.com"
] |
1045255891@qq.com
|
a628c09bbb76148913b593b875ff6638a185cf9b
|
f50df187c4ff35f83cb3d2577453fffffde952c8
|
/mtp_api/apps/payment/migrations/0013_billingaddress_debit_card_sender_details.py
|
36aac503d3dd3a3a47fc3ac2568cba06b46c955e
|
[
"MIT",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
ministryofjustice/money-to-prisoners-api
|
a5ecc2f5b3ebe48656712863bfc0b27bd8ffaea9
|
8b7d21e004f8bcb3ea31c764c1bf7ac7d2c4cb0c
|
refs/heads/main
| 2023-08-19T07:10:00.256944
| 2023-08-18T13:21:29
| 2023-08-18T13:21:29
| 37,074,635
| 7
| 1
|
MIT
| 2023-09-05T17:06:01
| 2015-06-08T15:15:05
|
Python
|
UTF-8
|
Python
| false
| false
| 620
|
py
|
# Generated by Django 1.10.7 on 2017-07-13 15:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('security', '0013_auto_20170713_1604'),
('payment', '0012_auto_20170621_1445'),
]
operations = [
migrations.AddField(
model_name='billingaddress',
name='debit_card_sender_details',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='billing_addresses', to='security.DebitCardSenderDetails'),
),
]
|
[
"ian.brechin@gmail.com"
] |
ian.brechin@gmail.com
|
08d316af05641642c96809e3a521de14d4d540d6
|
22d0e5b55a042c0f1dacfc4b200bf8e5e3498677
|
/datasets/junhyung.py
|
8a4990c0e7543a8f4f8b476af53b009c8d1728c6
|
[
"MIT"
] |
permissive
|
goluter/Tacotron2-Wavenet-Korean-TTS
|
b37a2ce15c5b124800507c17519fc2943755d481
|
1a80053b1b328124b77b105cdfa4447445e467d3
|
refs/heads/master
| 2023-05-04T13:27:45.059595
| 2021-05-25T08:55:06
| 2021-05-25T08:55:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,795
|
py
|
# -*- coding: utf-8 -*-
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os, json
from utils import audio
from text import text_to_sequence
def build_from_path(hparams, in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
"""
Preprocesses the speech dataset from a gven input path to given output directories
Args:
- hparams: hyper parameters
- input_dir: input directory that contains the files to prerocess
- out_dir: output directory of npz files
- n_jobs: Optional, number of worker process to parallelize across
- tqdm: Optional, provides a nice progress bar
Returns:
- A list of tuple describing the train examples. this should be written to train.txt
"""
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
path = os.path.join(in_dir, 'junhyung-recognition-All.json')
with open(path, encoding='utf-8') as f:
content = f.read()
data = json.loads(content)
for key, text in data.items():
wav_path = key.strip().split('/')
wav_path = os.path.join(in_dir, 'audio', '%s' % wav_path[-1])
# In case of test file
if not os.path.exists(wav_path):
continue
futures.append(executor.submit(partial(_process_utterance, out_dir, wav_path, text, hparams)))
index += 1
return [future.result() for future in tqdm(futures) if future.result() is not None]
def _process_utterance(out_dir, wav_path, text, hparams):
"""
Preprocesses a single utterance wav/text pair
this writes the mel scale spectogram to disk and return a tuple to write
to the train.txt file
Args:
- mel_dir: the directory to write the mel spectograms into
- linear_dir: the directory to write the linear spectrograms into
- wav_dir: the directory to write the preprocessed wav into
- index: the numeric index to use in the spectogram filename
- wav_path: path to the audio file containing the speech input
- text: text spoken in the input audio file
- hparams: hyper parameters
Returns:
- A tuple: (audio_filename, mel_filename, linear_filename, time_steps, mel_frames, linear_frames, text)
"""
try:
# Load the audio as numpy array
wav = audio.load_wav(wav_path, sr=hparams.sample_rate)
except FileNotFoundError: # catch missing wav exception
print('file {} present in csv metadata is not present in wav folder. skipping!'.format(wav_path))
return None
# rescale wav
if hparams.rescaling: # hparams.rescale = True
wav = wav / np.abs(wav).max() * hparams.rescaling_max
# M-AILABS extra silence specific
if hparams.trim_silence: # hparams.trim_silence = True
wav = audio.trim_silence(wav, hparams) # Trim leading and trailing silence
# Mu-law quantize, default 값은 'raw'
if hparams.input_type == 'mulaw-quantize':
# [0, quantize_channels)
out = audio.mulaw_quantize(wav, hparams.quantize_channels)
# Trim silences
start, end = audio.start_and_end_indices(out, hparams.silence_threshold)
wav = wav[start: end]
out = out[start: end]
constant_values = mulaw_quantize(0, hparams.quantize_channels)
out_dtype = np.int16
elif hparams.input_type == 'mulaw':
# [-1, 1]
out = audio.mulaw(wav, hparams.quantize_channels)
constant_values = audio.mulaw(0., hparams.quantize_channels)
out_dtype = np.float32
else: # raw
# [-1, 1]
out = wav
constant_values = 0.
out_dtype = np.float32
# Compute the mel scale spectrogram from the wav
mel_spectrogram = audio.melspectrogram(wav, hparams).astype(np.float32)
mel_frames = mel_spectrogram.shape[1]
if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length: # hparams.max_mel_frames = 1000, hparams.clip_mels_length = True
return None
# Compute the linear scale spectrogram from the wav
linear_spectrogram = audio.linearspectrogram(wav, hparams).astype(np.float32)
linear_frames = linear_spectrogram.shape[1]
# sanity check
assert linear_frames == mel_frames
if hparams.use_lws: # hparams.use_lws = False
# Ensure time resolution adjustement between audio and mel-spectrogram
fft_size = hparams.fft_size if hparams.win_size is None else hparams.win_size
l, r = audio.pad_lr(wav, fft_size, audio.get_hop_size(hparams))
# Zero pad audio signal
out = np.pad(out, (l, r), mode='constant', constant_values=constant_values)
else:
# Ensure time resolution adjustement between audio and mel-spectrogram
pad = audio.librosa_pad_lr(wav, hparams.fft_size, audio.get_hop_size(hparams))
# Reflect pad audio signal (Just like it's done in Librosa to avoid frame inconsistency)
out = np.pad(out, pad, mode='reflect')
assert len(out) >= mel_frames * audio.get_hop_size(hparams)
# time resolution adjustement
# ensure length of raw audio is multiple of hop size so that we can use
# transposed convolution to upsample
out = out[:mel_frames * audio.get_hop_size(hparams)]
assert len(out) % audio.get_hop_size(hparams) == 0
time_steps = len(out)
# Write the spectrogram and audio to disk
wav_id = os.path.splitext(os.path.basename(wav_path))[0]
# Write the spectrograms to disk:
audio_filename = '{}-audio.npy'.format(wav_id)
mel_filename = '{}-mel.npy'.format(wav_id)
linear_filename = '{}-linear.npy'.format(wav_id)
npz_filename = '{}.npz'.format(wav_id)
npz_flag = True
if npz_flag:
# Tacotron 코드와 맞추기 위해, 같은 key를 사용한다.
data = {
'audio': out.astype(out_dtype),
'mel': mel_spectrogram.T,
'linear': linear_spectrogram.T,
'time_steps': time_steps,
'mel_frames': mel_frames,
'text': text,
'tokens': text_to_sequence(text), # eos(~)에 해당하는 "1"이 끝에 붙는다.
'loss_coeff': 1 # For Tacotron
}
np.savez(os.path.join(out_dir, npz_filename), **data, allow_pickle=False)
else:
np.save(os.path.join(out_dir, audio_filename), out.astype(out_dtype), allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename), mel_spectrogram.T, allow_pickle=False)
np.save(os.path.join(out_dir, linear_filename), linear_spectrogram.T, allow_pickle=False)
# Return a tuple describing this training example
return (audio_filename, mel_filename, linear_filename, time_steps, mel_frames, text, npz_filename)
|
[
"donam.kim@nerdspace.co.kr"
] |
donam.kim@nerdspace.co.kr
|
939ca49891418288edff41e11e8f58b62433f2b2
|
5cdbb7a9d42393a48b12911e249e1f282d8fc12d
|
/volume.py
|
c3f4b5038b3876f49b4d466af1974f090e5cbb7a
|
[] |
no_license
|
tmots/Py-Serial-Volume-controller
|
ba47ffdb9cb048fe34993e885024c0c32a9cf729
|
d5947d28338a244b5d3fcbfcc316905cfc36e438
|
refs/heads/master
| 2023-08-20T19:26:36.820713
| 2021-11-02T05:56:22
| 2021-11-02T05:56:22
| 423,715,254
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 908
|
py
|
##serial import
import serial
import serial.tools.list_ports
import time
##volume import
from pycaw.pycaw import AudioUtilities, ISimpleAudioVolume
ports = list(serial.tools.list_ports.comports())
for p in ports:
##print (str(p)[:4])
if str(p)[7:14] == "Arduino" :
conport = str(p)[:4]
###
def vol_fnc(volnum):
numv = float(1/255*float(volnum))
sessions = AudioUtilities.GetAllSessions()
for session in sessions:
volume = session._ctl.QueryInterface(ISimpleAudioVolume)
volume.SetMasterVolume(numv, None)
###
ser = serial.Serial(conport, 9600)
##time.sleep(0.5)
def read_ser():
if ser.readable():
res = ser.readline()
volnum=res.decode()[:len(res)-1]
if int(volnum) < 20 :
volnum = 0
if int(volnum) > 235 :
volnum = 255
vol_fnc(volnum)
##print(volnum)
while True:
read_ser()
|
[
"themots@naver.com"
] |
themots@naver.com
|
4ae869802d356014ba532a755a4394f2dbae4aaa
|
ae6c2a6fa37613ac31b2bd3537b3276c9b333632
|
/licenses/tests/test_admin.py
|
5b04bcc99fc00431a31a5faa8858c3426cb75cac
|
[
"Apache-2.0"
] |
permissive
|
salopensource/sal
|
435a31904eb83048c02c9fbff02bbf832835d1b4
|
0895106c6729d5465da5e21a810e967a73ed6e24
|
refs/heads/main
| 2023-08-03T06:53:40.142752
| 2023-07-28T15:51:08
| 2023-07-28T15:51:08
| 35,883,375
| 227
| 94
|
Apache-2.0
| 2023-07-28T15:51:10
| 2015-05-19T13:21:57
|
Python
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
"""General functional tests for the license admin endpoints."""
from sal.test_utils import AdminTestCase
class LicenseAdminTest(AdminTestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {'license'}
|
[
"sheagcraig@gmail.com"
] |
sheagcraig@gmail.com
|
1d57d5e575b3352ecc1bd8749ea2ee7314866a62
|
0e07fb803877217bd4182c3e09fb7c4573e24b86
|
/Projeto/Initialization.py
|
ca20207b4759e0da944399c014b24722f8303303
|
[] |
no_license
|
careduardosilva/POO
|
5ca748810b6e3909a4a57cd105caf4c5fa2ea180
|
85d6acf51c9b0457832398d584943644a89988c7
|
refs/heads/master
| 2020-04-07T15:29:29.436768
| 2019-04-02T14:14:33
| 2019-04-02T14:14:33
| 158,487,918
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 575
|
py
|
import pygame
class Initialization():
def __init__(self,width,height):
self.__width = width
self.__height = height
self.__name = "Snake"
self.__isWorking = True
self.__window_params = pygame.display.set_mode((self.__largura,self.__altura))
def Start(self):
try:
pygame.init()
except:
print("Falha no módulo")
def Update(self):
while self.__isWorking:
pygame.display.update()
def Options(self):
pygame.display.set_caption("Snake")
def Quit(self):
pygame.quit()
def getWindowParams(self):
return self.__window_params
|
[
"noreply@github.com"
] |
careduardosilva.noreply@github.com
|
45eda63518e9642ea90f6829601589e7488fc123
|
df38f5ed0d7372201280ee2dd1a56d355725e658
|
/datastructs/trie.py
|
d3e8f796f095377db53ab265cefecc577aaa2cc2
|
[] |
no_license
|
ritwikdixit/PythonCode
|
3cf09b0078458eba49b67c4cd730bfbf0bbd1a57
|
4922cd3fedd363813a4979f36d779dde54649048
|
refs/heads/master
| 2021-01-10T18:24:18.889024
| 2018-08-15T03:55:42
| 2018-08-15T03:55:42
| 32,130,723
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 922
|
py
|
from collections import deque
class Node:
def __init__(self):
self.endofWord=False
self.links={}
class Trie:
def __init__(self):
self.root=Node()
#insert a string
def insert(self, s):
node = self.root
for char in s:
if char not in node.links:
node.links[char]=Node()
node=node.links[char]
node.endofWord=True
def contains(self, s):
node = self.root
for char in s:
if char not in node.links:
return False
node=node.links[char]
return node.endofWord
def printWords(self):
def recurse(node, progress):
if not node.links:
print(progress)
for charkey in node.links:
recurse(node.links[charkey], progress+charkey)
recurse(self.root, '')
#Tests
trie = Trie()
trie.insert('alpha')
trie.insert('alphapotomus')
trie.insert('colloquial')
trie.insert('bamf')
print(trie.contains('bamf'))
print(trie.contains('alph'))
print(trie.contains('sdi'))
trie.printWords()
|
[
"ritwik.dixit@gmail.com"
] |
ritwik.dixit@gmail.com
|
5b20f05b3a08f97b2306479e7ae2ed8e6c8c8235
|
f9fd81d1dca4873db5b9638dc675cadd121c1258
|
/concesionario/aplications/revisiones/apps.py
|
08dbdfe2651a9fa263bb9166aeba815604953435
|
[] |
no_license
|
julian1026/taller2-credito3-c2
|
1a3f25d61a3b9d9c199dbc66cd9116cef1244580
|
06b6e1e6680423f1a9c37ef679c8d63bace90896
|
refs/heads/main
| 2023-01-04T13:20:51.410531
| 2020-10-31T17:46:00
| 2020-10-31T17:46:00
| 305,496,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 95
|
py
|
from django.apps import AppConfig
class RevisionesConfig(AppConfig):
name = 'revisiones'
|
[
"33234916+julian1026@users.noreply.github.com"
] |
33234916+julian1026@users.noreply.github.com
|
341c2374a939293ad385c8220cbb821bc2ac526e
|
9a4f85835786cdc3ec7ecc809f7dbf1121717cf8
|
/proj2.py
|
d38499e90cdd4832fb9e50fecfa22415a2b005f6
|
[] |
no_license
|
hamz1q/Project-6-Connect4Game
|
7c18fcaf715bc1c22733be61a9114ce4c0d6f73d
|
6bdd4e704a43471bf6cab81e4b371ffcc894af18
|
refs/heads/master
| 2023-04-21T14:54:52.006348
| 2021-05-10T11:39:50
| 2021-05-10T11:39:50
| 366,020,997
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,103
|
py
|
"""
File: proj2.py
Author: Hamza Qureshi
Date: 04/23/2020
Section: 15
E-mail: hamz1@umbc.edu
Description:
Connect 4 game, with 2 players or with computer
"""
# tried to split by x and o, but did not work, so can only use grid with no x/o already entered
class AdjoinTheSpheres:
def __init__(self):
self.game_options = []
def main_menu(self, game_type):
self.game_options.append(game_type)
def load_game(self):
game_file = input('What map do you want to load? ')
# opening the file and making it readable
game_file = open(game_file, 'r+')
lines = game_file.readlines()
# tried to split by x and o
# for element in lines:
# if element == 'x' or element == 'o':
# element.split('x')
# element.split('o')
self.turn = lines[1]
# adding all the rows to a list and splitting them
self.final_board = []
self.final_board.append(lines[2].strip('\n').split(' '))
self.final_board.append(lines[3].strip('\n').split(' '))
self.final_board.append(lines[4].strip('\n').split(' '))
self.final_board.append(lines[5].strip('\n').split(' '))
self.final_board.append(lines[6].strip('\n').split(' '))
# converting all spaces in board to periods
for row in range(len(self.final_board)):
# remove last column b/c game board said 7 but had 8
self.final_board[row].pop()
for col in range(len(self.final_board[row])):
if self.final_board[row][col] == '':
self.final_board[row][col] = '.'
print(self.final_board[row][col], end= '')
print()
return self.final_board
def player_vs_player(self):
count = 1
while not self.connect_four():
if count % 2 > 0:
player_input = input('Player x What move do you want to make? Answer as Row (vertical) '
'Column (horizontal) or save game or load game: ')
if player_input == 'load game':
self.load_game()
elif player_input == 'save game':
self.save_game()
else:
# converting str to int if move entered
move_list = (player_input.split())
final_move = []
for i in move_list:
final_move.append(int(i)-1)
move_list = final_move
# makes sure space is not forbidden
if self.final_board[move_list[0]][move_list[1]] == '*':
print('That is a forbidden position try again!')
player_input = input('Player x What move do you want to make? Answer as Row (vertical) '
'Column (horizontal) or save game or load game: ')
# makes sure space isnt already taken
elif self.final_board[move_list[0]][move_list[1]] == 'x' or self.final_board[move_list[0]][move_list[1]] == 'o':
print('Space already occupied! Try again')
player_input = input(
'Player x What move do you want to make? Answer as Row (vertical) '
'Column (horizontal) or save game or load game: ')
else:
count += 1
self.final_board[move_list[0]][move_list[1]] = 'x'
for row in range(len(self.final_board)):
for col in range(len(self.final_board[row])):
print(self.final_board[row][col], end='')
print()
elif count % 2 == 0:
# converting str to int if move entered
move_list = (player_input.split())
final_move = []
for i in move_list:
final_move.append(int(i)-1)
move_list = final_move
# makes sure space is not forbidden
if self.final_board[move_list[0]][move_list[1]] == '*':
print('That is a forbidden position try again!')
player_input = input(
'Player o What move do you want to make? Answer as Row (vertical) '
'Column (horizontal) or save game or load game: ')
# makes sure space isnt already taken
elif self.final_board[move_list[0]][move_list[1]] == 'x' or self.final_board[move_list[0]][move_list[1]] == 'o':
print('Space already occupied! Try again')
player_input = input('Player o What move do you want to make? Answer as Row (vertical) '
'Column (horizontal) or save game or load game: ')
else:
count += 1
self.final_board[move_list[0]][move_list[1]] = 'o'
for row in range(len(self.final_board)):
for col in range(len(self.final_board[row])):
print(self.final_board[row][col], end='')
print()
if self.connect_four():
print('You win')
def player_vs_computer(self):
count = 1
while not self.connect_four():
if count % 2 > 0:
player_input = input('Player x What move do you want to make? Answer as Row (vertical) '
'Column (horizontal) or save game or load game: ')
if player_input == 'load game':
self.load_game()
elif player_input == 'save game':
self.save_game()
else:
# converting str to int if move entered
move_list = (player_input.split())
final_move = []
for i in move_list:
final_move.append(int(i) - 1)
move_list = final_move
# makes sure space is not forbidden
if self.final_board[move_list[0]][move_list[1]] == '*':
print('That is a forbidden position try again!')
player_input = input(
'Player x What move do you want to make? Answer as Row (vertical) '
'Column (horizontal) or save game or load game: ')
# makes sure space isnt already taken
elif self.final_board[move_list[0]][move_list[1]] == 'x' or self.final_board[move_list[0]][
move_list[1]] == 'o':
print('Space already occupied! Try again')
player_input = input(
'Player x What move do you want to make? Answer as Row (vertical) '
'Column (horizontal) or save game or load game: ')
else:
count += 1
self.final_board[move_list[0]][move_list[1]] = 'x'
for row in range(len(self.final_board)):
for col in range(len(self.final_board[row])):
print(self.final_board[row][col], end='')
print()
elif count % 2 == 0:
from random import randint
move_list = [randint(0,4), randint(0,6)]
# makes sure space is not forbidden
if self.final_board[move_list[0]][move_list[1]] == '*':
print('That is a forbidden position try again!')
move_list = [randint(0, 4), randint(0, 6)]
# makes sure space isnt already taken
elif self.final_board[move_list[0]][move_list[1]] == 'x' or self.final_board[move_list[0]][move_list[1]] == 'o':
print('That space is already taken')
move_list = [randint(0, 4), randint(0, 6)]
else:
count += 1
self.final_board[move_list[0]][move_list[1]] = 'o'
print('Computer turn completed')
for row in range(len(self.final_board)):
for col in range(len(self.final_board[row])):
print(self.final_board[row][col], end='')
print()
if self.connect_four():
print('You win')
def save_game(self):
pass
def connect_four(self):
# check for connect 4 horizontally and vertically
for row in range(len(self.final_board)):
for col in range(len(self.final_board[row])):
if 'x x x x' in self.final_board[row][col] or 'o o o o' in self.final_board[row][col]:
self.winner = "You win"
return self.winner
def play_game(self):
# adding game options to menu
print('AdjoinTheSpheres Main Menu')
print(self.game_options[0])
print(self.game_options[1])
print(self.game_options[2])
# input validation for menu
options = int(input('Select option from the menu: '))
while options != 1 and options != 2 and options != 3:
print('Please select a valid option from the menu')
options = int(input('Select an option from the menu: '))
# calls load_game function
if options == 1 or options == 2:
# self.game_file = input('What map do you want to load? ')
self.load_game()
if options == 1:
self.player_vs_player()
elif options == 2:
self.player_vs_computer()
if __name__ == "__main__":
game_1 = AdjoinTheSpheres()
game_1.main_menu( '1.) New game (2 Players)')
game_1.main_menu( '2.) New Game (Player vs Computer)')
game_1.main_menu( '3.) Exit Game')
game_1.play_game()
|
[
"hamz1@linux5.gl.umbc.edu"
] |
hamz1@linux5.gl.umbc.edu
|
910c33d7dbbc42eb2bff828efaca2c423e7434d0
|
4079cb23c582838573ccab96e91ddfac55a73ef6
|
/solution.py
|
78f3678f9f24cbb8d91d435d417078c2f8df7530
|
[
"Apache-2.0"
] |
permissive
|
gdmanandamohon/paytmlab-test
|
90c878e76a4d0e0885bd1caec763339dd32aa5b7
|
f304f30d83d6a0626f905dc35194cd92c9c7e742
|
refs/heads/main
| 2023-01-22T09:13:27.087790
| 2020-11-30T05:59:10
| 2020-11-30T05:59:10
| 317,124,758
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,363
|
py
|
import pandas as pd
import numpy as np
def read_df(url):
return pd.read_csv(url)
#load df
df_dir = 'data/2019/part-0000{0}-890686c0-c142-4c69-a744-dfdc9eca7df4-c000.csv'
dfs = []
for x in range(5):
td = read_df(df_dir.format(x))
dfs.append(td)
wt_df = pd.concat(dfs)
'''Now I have the data frames together and filter out the missing values'''
wt_df = wt_df[ (wt_df['TEMP']!=9999.9) | (wt_df['DEWP']!= 9999.9) | (wt_df['SLP']!=9999.9) | (wt_df['STP']!= 9999.9)]
wt_df = wt_df[ (wt_df['VISIB']!=999.9) | (wt_df['WDSP']!= 999.9) | (wt_df['MXSPD']!=999.9) | (wt_df['GUST']!= 999.9)]
wt_df = wt_df[ (wt_df['MAX']!=9999.9) | (wt_df['MIN']!= 9999.9) | (wt_df['PRCP']!=99.9) | (wt_df['SNDP']!= 999.9)]
#load station and countrylist
st_dir= 'stationlist.csv'
cnt_dir= 'countrylist.csv'
cnt_df = read_df(cnt_dir)
st_df = read_df(st_dir)
'''here I made a join of 3 different dataframes for merging the data into one data frame and my new dataframe will have new columns as country name '''
''' In Haddop platform it's much easier to do, since i don't have that platform facilities, I am moving forward with pandas df'''
finall_df = wt_df.join(st_dir.set_index('STN_NO'), on='STN--- ').join(cnt_df.set_index('COUNTRY_ABBR'), on='COUNTRY_ABBR')
#Here in the table 'finall_df' do have some astetic value '*' with MIN, didn't get any direction how to handle that. My assumption says that we should just laeave it out and put th real value in thse space.
''' Let's consider finall_df as table and write SQL for those three question'''
''' ******** Step 2 - Questions ************* '''
# 1. Which country had the hottest average mean temperature over the year?
SELECT COUNTRY_FULL, max(msx) from
(
SELECT COUNTRY_FULL, max(avg_TEMP) as msx from (select COUNTRY_ABBR, avg(TEMP) AS avg_TEMP
from finall_df
group by COUNTRY_ABBR)
)
#2. Which country had the most consecutive days of tornadoes/funnel cloud formations?
'''Didn't get the exactly idea based on on which I should determine the tornedo'''
#3. Which country had the second highest average mean wind speed over the year?
SELECT COUNTRY_FULL, max(mxs) AS secon_max_speed
FROM (SELECT COUNTRY_FULL, max(avg_MXSPD) as msx from (select COUNTRY_ABBR, avg(MXSPD) AS avg_MXSPD
from finall_df
group by COUNTRY_ABBR)
WHERE msx< max(msx)
|
[
"gdm.anandamohon@gmail.com"
] |
gdm.anandamohon@gmail.com
|
dffcbf3c4a94c007967039a87da9f55a16969b52
|
456557cf9de8ee3c184b1bf5fceafbff7e52d990
|
/app.py
|
c24b90a5a6be34f79e48d0887e2991a7d513b2dc
|
[] |
no_license
|
Konovalov-Kos/test_BWT
|
b9297a098c036f432afbf44f85593f1f254a57b4
|
feeec1c1aac991bed0635065eb12a4048f8a5a1a
|
refs/heads/master
| 2021-07-11T20:39:30.667547
| 2020-02-15T00:39:45
| 2020-02-15T00:39:45
| 240,622,806
| 0
| 0
| null | 2021-03-20T02:54:15
| 2020-02-15T00:38:56
|
Python
|
UTF-8
|
Python
| false
| false
| 305
|
py
|
from flask import Flask, render_template
from gevent.pywsgi import WSGIServer
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def home():
return 'home'
if __name__ == '__main__':
http_server = WSGIServer(('', 5000), app)
http_server.serve_forever()
# app.run(debug=True)
|
[
"kosm888@mail.ru"
] |
kosm888@mail.ru
|
5aa3786f1bbd31722bd0f633d06b60a5c4225cfe
|
ed8af552d5c70478eb9e9a5aaa371ab9b96217cb
|
/VariantDatabase/views.py
|
1eb600b6fa1a3b8f171087788961367202e00ae9
|
[] |
no_license
|
josephhalstead/VariantDatabase
|
1cbadc90f7aaf0f84715880fe16f67c8a0efb7ce
|
191fb09d13aded90576bf81966c201b160425549
|
refs/heads/master
| 2021-01-21T09:43:06.259926
| 2017-10-26T11:20:00
| 2017-10-26T11:20:00
| 82,926,143
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 16,405
|
py
|
from django.shortcuts import render, get_object_or_404, redirect
from VariantDatabase.models import *
from django.contrib.auth.decorators import login_required
from .forms import *
from django.utils import timezone
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
import parsers.vcf_parser as vcf_parser
import parsers.file_parsers as parsers
from django.forms import modelformset_factory
import collections
from django.template.loader import render_to_string
from django.http import HttpResponse, Http404, JsonResponse
import re
import base64
from django.core.files.base import ContentFile
from django.core.files import File
import VariantDatabase.utils.variant_utilities as variant_utilities
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from VariantDatabase.serializers import VariantFreqSerializer
import myvariant
import urllib2
@login_required
def home_page(request):
"""
The homepage
"""
return render(request, 'VariantDatabase/home_page.html', {})
@login_required
def list_sections(request):
"""
This view allows the user to view a section page
On this page the relevent worksheets will be shown.
TODO: sort worksheets by status
"""
all_sections = Section.objects.all()
return render(request, 'VariantDatabase/list_sections.html', {'all_sections': all_sections} )
@login_required
def list_worksheet_samples(request, pk_worksheet):
"""
This view lists the samples in a particular worksheet
"""
worksheet = get_object_or_404(Worksheet, pk=pk_worksheet)
if request.method == 'POST':
#if user is authorised
worksheet = worksheet = get_object_or_404(Worksheet, pk=pk_worksheet)
worksheet.status = '3'
worksheet.save()
return redirect(list_worksheet_samples, pk_worksheet)
else:
form = WorksheetStatusUpdateForm()
quality_data = worksheet.get_quality_data()
samples_in_worksheet = Sample.objects.filter(worksheet = worksheet, visible=True)
return render(request, 'VariantDatabase/list_worksheet_samples.html', {'samples_in_worksheet': samples_in_worksheet, 'form': form, 'worksheet': worksheet, 'quality_data': quality_data})
@login_required
def sample_summary(request, pk_sample):
"""
This view displays the various information about a sample
"""
sample = get_object_or_404(Sample, pk=pk_sample)
total_summary = sample.total_variant_summary()
reports = Report.objects.filter(sample=sample)
if request.method == "POST": #if the user clicked create a new report
if 'reportform' in request.POST:
report_form = ReportForm(request.POST)
if report_form.is_valid():
report = report_form.save(commit=False)
report.sample = sample
report.status ='1'
report.save()
report.initialise_report()
return redirect(create_sample_report, sample.pk, report.pk)
elif 'filterform' in request.GET: #if the user clicked filter
filter_form = FilterForm(request.GET)
report_form = ReportForm()
consequences_to_include =[]
for key in request.GET:
if key != 'csrfmiddlewaretoken' and key != 'filterform' and 'freq' not in key:
if key == 'five_prime_UTR_variant': #can't start python variables with a number so have to change key from 5_prime_UTR_variant to five_prime_UTR_variant
consequences_to_include.append('5_prime_UTR_variant')
elif key == 'three_prime_UTR_variant':
consequences_to_include.append('3_prime_UTR_variant')
else:
consequences_to_include.append(key)
max_af = request.GET.get('freq_max_af')
consequences_query_set = Consequence.objects.filter(name__in = consequences_to_include)
variant_samples =VariantSample.objects.filter(sample=sample, variant__worst_consequence__in=consequences_query_set).filter(variant__max_af__lte=max_af).order_by('variant__worst_consequence__impact', 'variant__max_af') #performance?
variants = Variant.objects.filter(variant_hash__in= variant_samples.values_list('variant_id', flat=True))
summary = sample.variant_query_set_summary(variants)
gene_coverage = GeneCoverage.objects.filter(sample=sample)
exon_coverage = ExonCoverage.objects.filter(sample=sample)
user_settings = UserSetting.objects.filter(user=request.user)
return render(request, 'VariantDatabase/sample_summary.html', {'sample': sample, 'variants': variant_samples, 'report_form': report_form, 'reports': reports, 'summary': summary, 'total_summary': total_summary,
'filter_form': filter_form, 'gene_coverage': gene_coverage,'exon_coverage': exon_coverage , 'user_settings': user_settings })
else:
filter_dict = sample.worksheet.sub_section.create_filter_dict()
report_form = ReportForm()
filter_form = FilterForm(initial=filter_dict)
consequences_to_include =[]
for key in filter_dict:
if 'freq' not in key and filter_dict[key] ==True:
if key == 'five_prime_UTR_variant':
consequences_to_include.append('5_prime_UTR_variant')
elif key == 'three_prime_UTR_variant':
consequences_to_include.append('3_prime_UTR_variant')
else:
consequences_to_include.append(key)
consequences_query_set = Consequence.objects.filter(name__in = consequences_to_include)
variant_samples =VariantSample.objects.filter(sample=sample, variant__worst_consequence__in=consequences_query_set).filter(variant__max_af__lte=filter_dict['freq_max_af']).order_by('variant__worst_consequence__impact', 'variant__max_af')
variants = Variant.objects.filter(variant_hash__in= variant_samples.values_list('variant_id', flat=True))
summary = sample.variant_query_set_summary(variants)
gene_coverage = GeneCoverage.objects.filter(sample=sample)
exon_coverage = ExonCoverage.objects.filter(sample=sample)
user_settings = UserSetting.objects.filter(user=request.user)
return render(request, 'VariantDatabase/sample_summary.html', {'sample': sample, 'variants': variant_samples, 'report_form': report_form, 'reports': reports, 'summary': summary, 'total_summary': total_summary,
'filter_form': filter_form, 'filter_dict': filter_dict, 'cons': consequences_to_include, 'gene_coverage': gene_coverage,'exon_coverage': exon_coverage, 'user_settings': user_settings})
@login_required
def variant_detail(request, pk_sample, variant_hash):
"""
This view displays the detial for a particular variant.
It combines - sample specific annoation data e.g. pulled from the vcf
- Global variant data e.g. chr, pos, ref that are associated with all variants of the type
- Allows classification
"""
sample = get_object_or_404(Sample, pk=pk_sample)
variant = get_object_or_404(Variant, variant_hash=variant_hash)
other_alleles = variant.get_other_alleles()
transcripts = VariantTranscript.objects.filter(variant = variant)
return render(request, 'VariantDatabase/variant_detail.html', {'variant': variant, 'transcripts': transcripts, 'other_alleles': other_alleles})
@login_required
def view_gene(request, gene_pk):
"""
A view to allow the user to view all the Variants in a Gene.
"""
gene_pk = gene_pk.upper()
gene = Gene.objects.get(name=gene_pk)
variants = gene.get_all_variants()
form = SearchFilterForm()
return render(request,'VariantDatabase/gene.html', {'variants': variants, 'gene': gene, 'form': form})
@login_required
def view_detached_variant(request, variant_hash):
"""
View a variant independent of any sample it is associated with.
"""
variant = get_object_or_404(Variant, variant_hash=variant_hash)
other_alleles = variant.get_other_alleles()
transcripts = VariantTranscript.objects.filter(variant = variant)
return render(request, 'VariantDatabase/variant_view.html', {'variant': variant, 'transcripts': transcripts, 'other_alleles': other_alleles} )
@login_required
def create_sample_report(request, pk_sample, pk_report):
"""
Allow the user to create a new report and select their responses.
Uses the Model Formset functionaility of Django to accommplish this.
"""
report = get_object_or_404(Report, pk=pk_report)
ReportVariantFormset = modelformset_factory(ReportVariant, fields=('status','variant'), extra=0, widgets={"variant": forms.HiddenInput()})
if request.method == 'POST': # if the user clicks submit
#create a formset factory - using the ReportVariant model. Hide the variant field.
formset = ReportVariantFormset(request.POST)
if formset.is_valid():
instances = formset.save()
report.status = '2'
report.save()
return redirect(view_sample_report, pk_sample, pk_report)
report_variant_formset = ReportVariantFormset(queryset=ReportVariant.objects.filter(report=report)) # populate formset
variants = ReportVariant.objects.filter(report=report) #get the variants from the ReportVariant model.
#Create an ordered dict. Use this to store Variants and forms together using Variant hash as key
#For example: dict = {variant_hash:[Variant, Form]}
#This allows us to put variants and selector drop downs from form next to each other in a HTML table.
my_dict =collections.OrderedDict()
for variant in variants:
my_dict[variant.variant.variant_hash] = [variant]
for form in report_variant_formset:
key = form.__dict__['initial']['variant']
my_dict[key].append(form)
return render(request, 'VariantDatabase/create_sample_report.html', {'formset': report_variant_formset, 'dict': my_dict} )
@login_required
def view_sample_report(request, pk_sample, pk_report):
"""
View a sample report i.e. the output of the create_sample_report view.
"""
report = get_object_or_404(Report, pk=pk_report)
report_variants = ReportVariant.objects.filter(report=report)
return render(request, 'VariantDatabase/view_sample_report.html' , {'report': report, 'report_variants': report_variants})
@login_required
def ajax_detail(request):
"""
Ajax View - create the top div of the summary page e.g. detial, IGV, evidence when a user clicks the row.
"""
if request.is_ajax():
variant_hash = request.GET.get('variant_hash')
sample_pk = request.GET.get('sample_pk')
variant_hash = variant_hash.strip()
sample_pk = sample_pk.strip()
variant= Variant.objects.get(variant_hash=str(variant_hash))
sample = Sample.objects.get(pk=sample_pk)
variant_sample = VariantSample.objects.get(variant=variant, sample=sample)
comments =Comment.objects.filter(variant_sample=variant_sample)
perms = request.user.has_perm('VariantDatabase.add_comment')
html = render_to_string('VariantDatabase/ajax_detail.html', {'variant': variant, 'sample': sample, 'comments': comments, 'perms': perms})
return HttpResponse(html)
else:
raise Http404
@login_required
def ajax_comments(request):
"""
Ajax View - when the user clicks the upload comment/file button this updates the comment section of the page.
Clipboard paste only works on HTML5 enabled browser
"""
if request.is_ajax():
variant_hash = request.POST.get('variant_hash')
sample_pk = request.POST.get('sample_pk')
comment_text = request.POST.get('comment_text')
variant_hash = variant_hash.strip()
sample_pk = sample_pk.strip()
comment_text = comment_text.strip()
variant= Variant.objects.get(variant_hash=str(variant_hash))
sample = Sample.objects.get(pk=sample_pk)
variant_sample = VariantSample.objects.get(variant=variant, sample=sample)
if len(comment_text) >1: #Check user has entered a comment
new_comment = Comment(user=request.user, text=comment_text, time=timezone.now(),variant_sample=variant_sample )
new_comment.save()
if request.FILES.get('file', False) != False: #Deal with files selected using the file selector html widget
file = request.FILES.get('file')
new_evidence = Evidence()
new_evidence.file = file
new_evidence.comment= new_comment
new_evidence.save()
if request.POST.get('image_data') !=None: #deal with images pasted in from the clipboard
image_data = request.POST.get('image_data')
image_data = image_data.strip() #strip of any leading characters
dataUrlPattern = re.compile('data:image/(png|jpeg);base64,(.*)$') #add appropiate header
ImageData = dataUrlPattern.match(image_data).group(2)
ImageData = base64.b64decode(ImageData) #to binary
new_evidence = Evidence()
new_evidence.comment= new_comment
new_evidence.file.save(str(sample.pk)+"_"+str(new_comment.pk)+"_clip_image.png", ContentFile(ImageData)) #save image
new_evidence.save()
comments =Comment.objects.filter(variant_sample=variant_sample)
html = render_to_string('VariantDatabase/ajax_comments.html', {'comments': comments, 'variant': variant, 'sample': sample})
return HttpResponse(html)
else:
raise Http404
@login_required
def ajax_table_expand(request):
"""
An AJAX view for the child rows in the Summary page view.
It returns the HTML data that goes in the child row.
"""
if request.is_ajax():
variant_hash = request.GET.get('variant_hash')
variant_hash = variant_hash.strip()
variant= Variant.objects.get(variant_hash=str(variant_hash))
variant_transcripts = VariantTranscript.objects.filter(variant=variant)
html = render_to_string('VariantDatabase/ajax_table_expand.html', {'variant_transcripts': variant_transcripts})
return HttpResponse(html)
else:
raise Http404
@login_required
def user_settings(request):
"""
View with a form for changing user settings.
"""
user_settings = UserSetting.objects.filter(user=request.user)
if request.method == 'POST':
user_settings = user_settings[0]
form = UserSettingsForm(request.POST, instance=user_settings)
if form.is_valid():
user_settings = form.save()
return redirect('home_page')
if user_settings.exists():
form = UserSettingsForm(instance=user_settings[0])
else:
user_settings = UserSetting(user=request.user)
user_settings.save()
form = UserSettingsForm(instance=user_settings)
return render(request, 'VariantDatabase/user_settings.html' , {'form': form})
@login_required
def search(request):
"""
Main search page for the database.
Currently allows :
1) searching by variant e.g. 2-4634636-A-T
2) searching by gene
"""
form = SearchForm()
if request.GET.get('search') != "" and request.GET.get('search') != None: #if we have typed in the main search
search_query = request.GET.get('search').upper()
variant_search = re.compile("^([1-9]{1,2}|[XYxy])-\d{1,10}-[ATGCatgc]+-[ATGCatgc]+$") #matches a variant search e.g. 22-549634966-AG-TT
gene_search = re.compile("^[A-Z][A-Z1-9]+$") #matches a string which looks like a gene name
if variant_search.match(search_query): #we have searched for a variant
variant_list = search_query.split('-')
chromosome = 'chr'+variant_list[0]
position = variant_list[1]
ref = variant_list[2]
alt = variant_list[3]
variant_hash = variant_utilities.get_variant_hash(chromosome,position,ref,alt)
try:
Variant.objects.get(variant_hash=variant_hash)
except:
return render(request, 'VariantDatabase/search.html' , {'error': True, 'form': form})
return redirect(view_detached_variant, variant_hash)
elif gene_search.match(search_query): #Looks like a gene
try:
gene = Gene.objects.get(name=search_query)
except:
return render(request, 'VariantDatabase/search.html' , {'error': True, 'form': form})
return redirect(view_gene, search_query)
else:
return render(request, 'VariantDatabase/search.html' , {'error': True, 'form': form})
else:
return render(request, 'VariantDatabase/search.html' , {'form': form})
#Under development
def api_variants(request):
"""
API for getting all variants
"""
if request.method == 'GET':
variants = Variant.objects.all()
serializer = VariantFreqSerializer(variants, many=True)
return JsonResponse(serializer.data, safe=False)
def additional_annotation(request, variant_sample_pk):
variant_sample = get_object_or_404(VariantSample, pk=variant_sample_pk)
variant = variant_sample.variant
chromosome = variant.chromosome[3:]
position = variant.position
ref = variant.ref
alt = variant.alt
mv = myvariant.MyVariantInfo()
q= 'chrom:'+chromosome + ' AND vcf.position:' + str(position) + ' AND vcf.ref:' + ref + ' AND vcf.alt:' + alt
#data = mv.query(q)
response = urllib2.urlopen('http://python.org/')
html = response.read()
return JsonResponse(html, safe=False)
|
[
"josephhalstead@gmail.com"
] |
josephhalstead@gmail.com
|
02fe70e3f0df2bf54bd7acc70a230e3ab1d39613
|
ebfbc3ef1ec356ac42a2e681ce71cd4b18bfd84e
|
/news/preprocess/epochtimes.py
|
e3262450404f99d0bce5dc4801cd6437d208fe0e
|
[] |
no_license
|
eric88525/Taiwan_news_dataset
|
e837ca61e1252bfc0138420e5f1128a611b7a1ae
|
5df326b75bbbc7bc785e7358231a10dacfc7f810
|
refs/heads/master
| 2023-09-03T19:35:08.918173
| 2021-07-17T07:15:54
| 2021-07-17T07:15:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
import re
import unicodedata
import dateutil.parser
from bs4 import BeautifulSoup
from news.db.schema import News
REPORTER_PATTERN = re.compile(r'\(大紀元記者(.*?)報導\)')
URL_PATTERN = re.compile(
r'https://www.epochtimes.com/b5/(\d+)/(\d+)/(\d+)/n\d+\.htm'
)
def parse(ori_news: News) -> News:
"""Parse Epochtimes news from raw HTML.
Input news must contain `raw_xml` and `url` since these
information cannot be retrieved from `raw_xml`.
"""
# Information which cannot be parsed.
parsed_news = News(
# Minimize `raw_xml`.
raw_xml=re.sub(r'\s+', ' ', ori_news.raw_xml),
url=ori_news.url,
)
soup = None
try:
soup = BeautifulSoup(parsed_news.raw_xml, 'html.parser')
except Exception:
raise ValueError('Invalid html format.')
# News article.
article = ''
try:
article_tags = soup.select('div#artbody > p,h2')
article = ' '.join(map(lambda tag: tag.text.strip(), article_tags))
article = unicodedata.normalize('NFKC', article).strip()
except Exception:
raise ValueError('Fail to parse epochtimes news article.')
# News category.
category = ''
try:
category = soup.select('div#breadcrumb > a')[-1].text
category = unicodedata.normalize('NFKC', category).strip()
except Exception:
# There may not have category.
category = ''
# News datetime.
news_datetime = ''
try:
match = URL_PATTERN.match(parsed_news.url)
year = int(match.group(1))
month = int(match.group(2))
day = int(match.group(3))
news_datetime = dateutil.parser.isoparse(
f"20{year:02d}-{month:02d}-{day:02d}T00:00:00Z"
)
news_datetime = news_datetime.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
news_datetime = unicodedata.normalize('NFKC', news_datetime)
except Exception:
# There may not have category.
news_datetime = ''
# News reporter.
reporter = ''
try:
reporter = REPORTER_PATTERN.search(article).group(1)
except Exception:
# There may not have reporter.
reporter = ''
# News title.
title = ''
try:
title = soup.select('h1.title')[0].text
title = unicodedata.normalize('NFKC', title).strip()
except Exception:
raise ValueError('Fail to parse epochtimes news title.')
parsed_news.article = article
parsed_news.category = category
parsed_news.company = '大紀元'
parsed_news.datetime = news_datetime
parsed_news.reporter = reporter
parsed_news.title = title
return parsed_news
|
[
"nail1021734@gmail.com"
] |
nail1021734@gmail.com
|
c4b3df7602aff044b0067eafd4393a20ebbd061b
|
931a9df55e40558593d3e2bd32c0fb339c64fdc3
|
/setup.py
|
3cbef67be4115f168a40e2c3c1e09012750b2263
|
[] |
no_license
|
ch-liuzhide/geogenius-python-sdk
|
23a06aad3ef42049b1743714c14e13ea6400c297
|
c74665dc5e1818b2c49eccb60175b1d741ec188b
|
refs/heads/master
| 2022-09-17T00:00:52.172414
| 2020-05-22T04:15:36
| 2020-05-22T04:15:36
| 268,464,270
| 1
| 0
| null | 2020-06-01T08:19:45
| 2020-06-01T08:19:45
| null |
UTF-8
|
Python
| false
| false
| 1,011
|
py
|
import os.path
import sys
from setuptools import setup, find_packages
open_kwds = {}
if sys.version_info > (3,):
open_kwds['encoding'] = 'utf-8'
profile = os.environ.get('GEOGENIUS_PROFILE', '')
if profile == '':
requires = []
else:
req_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "requirements.txt")
with open(req_path) as f:
requires = f.read().splitlines()
# with open('README.md', **open_kwds) as f:
# readme = f.read()
# long_description=readme,
setup(name='geogeniustools',
version='0.1',
description='API wrapper and imagery access for the Geogenius Platform',
classifiers=[],
keywords='',
author='Huawei Technology zhaoxianwei',
author_email='',
packages=find_packages(exclude=['docs', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=requires,
setup_requires=['pytest-runner'],
tests_require=['pytest', 'vcrpy']
)
|
[
"JIXF@jixiaofengdeMacBook-Pro.local"
] |
JIXF@jixiaofengdeMacBook-Pro.local
|
48f8090027d651dfdb073756c51c7555e7995946
|
b406e0e126083637f2f170ef2d7f1fe379ef5048
|
/jd/jd/jd/pipelines.py
|
2372a058664f21e14baaa22aa5598965788b62e4
|
[] |
no_license
|
junjunjuner/jingdong
|
62b879d1154d411eaefb67375fec262db1dce91d
|
5da6e30414d2848fa4c16ab5d8aeb90946987dc6
|
refs/heads/master
| 2021-08-29T22:48:53.592375
| 2017-12-15T06:23:38
| 2017-12-15T06:23:38
| 114,190,840
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,368
|
py
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
from jd.settings import FIELDS_TO_EXPORT
from scrapy import signals
from scrapy.exporters import CsvItemExporter
import time
class JdtestPipeline(object):
def process_item(self, item, spider):
return item
class CSVPipeline(object):
def __init__(self):
self.files = {}
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
file = open('jd电饭煲_%s.csv' % (self.printfNow()), 'wb')
# file = open('%s_pages_%s.csv' % (spider.name,self.printfNow()), 'a+b')
self.files[spider] = file
self.exporter = CsvItemExporter(file)
self.exporter.fields_to_export = FIELDS_TO_EXPORT
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
file = self.files.pop(spider)
file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
def printfNow(self):
return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
|
[
"523614801@qq.com"
] |
523614801@qq.com
|
edce723627f80b36638a902c1965bc81af5c50f4
|
63eb8b194590e444b0e80e1424dfc563023518dc
|
/vislab/datasets/__init__.py
|
d4e205656c48dded044f6bb81cd0c06de9ff549a
|
[] |
no_license
|
caomw/vislab
|
ba3e523c67662a85fb03418ce9ccd4b9f10ad484
|
bc04740bb685c81c184ea0d3f3a11f4e37787f36
|
refs/heads/master
| 2020-12-28T23:15:29.669446
| 2014-02-24T19:01:50
| 2014-02-24T19:01:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 75
|
py
|
import pascal
import wikipaintings
import ava
import flickr
import behance
|
[
"sergeykarayev@gmail.com"
] |
sergeykarayev@gmail.com
|
af780707d53503cc0c6f8b741168a08be67472fb
|
4f5f40a874a9ae17d25cb2fe8cac5fba327b5265
|
/examples/test_zephyr_philosophers.py
|
fd5739590bc4aec271c1df7a635b844c0bfa47c4
|
[
"Apache-2.0"
] |
permissive
|
dickeylim/tcf
|
4f580b507d381542f22c0899e4922add13f76926
|
4ecda0e1983fed2cb932242395a5be4754349534
|
refs/heads/master
| 2020-06-25T16:30:34.613115
| 2019-08-27T18:39:25
| 2019-08-27T18:39:25
| 199,366,410
| 0
| 0
|
Apache-2.0
| 2019-07-29T02:48:55
| 2019-07-29T02:48:54
| null |
UTF-8
|
Python
| false
| false
| 585
|
py
|
#! /usr/bin/python2
#
# Copyright (c) 2017 Intel Corporation
#
# SPDX-License-Identifier: Apache-2.0
#
import os
import re
import tcfl.tc
import tcfl.tl
@tcfl.tc.tags(**tcfl.tl.zephyr_tags())
# Ask for a target that defines an zephyr_board field, which indicates
# it can run the Zephyr OS
@tcfl.tc.target("zephyr_board",
app_zephyr = os.path.join(tcfl.tl.ZEPHYR_BASE,
"samples", "philosophers"))
class _test(tcfl.tc.tc_c):
@staticmethod
def eval(target):
target.expect(re.compile("Philosopher 5.*THINKING"))
|
[
"inaky.perez-gonzalez@intel.com"
] |
inaky.perez-gonzalez@intel.com
|
469ffb9dc230b3cf294d237dbf947e5f2890f37a
|
30997e4c4338a8dd5216d4bb07ea860293a33da1
|
/test/test_preferences.py
|
b7e54236ac563a87e09a493a0d686f97ab4dd010
|
[
"MIT"
] |
permissive
|
azumafuji/ulauncher-albert-calculate-anything
|
739a537ecd4921be20d01dc144be643141acc580
|
ee0903174c8b87cd1f7c3b6c1acef10702547507
|
refs/heads/master
| 2023-07-07T12:11:08.526109
| 2021-08-12T06:39:56
| 2021-08-12T06:39:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,203
|
py
|
from contextlib import contextmanager
from calculate_anything.currency.providers import (
CoinbaseCurrencyProvider,
MyCurrencyNetCurrencyProvider,
ECBCurrencyProvider,
FixerIOCurrencyProvider,
)
from calculate_anything.units import UnitsService
import pytest
from calculate_anything.currency import CurrencyService
from calculate_anything.lang import LanguageService
from calculate_anything.time import TimezoneService
from calculate_anything.preferences import Preferences
from test.tutils import reset_instance
@contextmanager
def mock_providers(mock_currency_provider):
klasses = [
CoinbaseCurrencyProvider,
FixerIOCurrencyProvider,
MyCurrencyNetCurrencyProvider,
ECBCurrencyProvider,
]
data = [{}, {}, {}, '']
use_json = [True, True, True, False]
with mock_currency_provider(klasses, data, use_json):
yield
def test_defaults(in_memory_cache, mock_currency_provider):
with reset_instance(
Preferences,
LanguageService,
TimezoneService,
UnitsService,
CurrencyService,
), in_memory_cache(), mock_providers(mock_currency_provider):
preferences = Preferences()
preferences.commit()
assert preferences.language.lang == LanguageService().lang == 'en_US'
assert (
preferences.time.default_cities == TimezoneService().default_cities
)
assert (
preferences.units.conversion_mode
== UnitsService()._conversion_mode
== UnitsService.ConversionMode.NORMAL
)
assert (
preferences.currency.cache_enabled
== CurrencyService().cache_enabled
is False
)
assert (
preferences.currency.cache_update_frequency
== CurrencyService()._cache._update_frequency
== 0
)
assert (
preferences.currency.default_currencies
== CurrencyService().default_currencies
== []
)
assert sorted(map(str, preferences.currency.providers)) == sorted(
map(
str,
(
ECBCurrencyProvider,
MyCurrencyNetCurrencyProvider,
CoinbaseCurrencyProvider,
),
)
)
assert CurrencyService()._is_running is False
CurrencyService().stop()
test_spec_normal_alts = [
{
'language': {'lang': 'en_US'},
'time': {
'default_cities': 'Athens GR, New York City US',
},
'units': {
'conversion_mode': 'crazy',
},
'currency': {
'cache_frequency': 100000,
'providers': [
('fixerIO', '01010'),
('fixerIO', 'asasd'),
('fixerIO', 'some'),
('fixerIO', 'value'),
('fixerIO', '12345'),
],
'default_currencies': 'eur, BTC, usd, RON',
},
},
{
'language': {'lang': 'en_US'},
'time': {
'default_cities': ['Athens GR', 'New York City US'],
},
'units': {
'conversion_mode': UnitsService.ConversionMode.CRAZY,
},
'currency': {
'cache_frequency': '100000',
'providers': [
(FixerIOCurrencyProvider(api_key='00001'), ''),
(FixerIOCurrencyProvider(api_key='00002'), ''),
(FixerIOCurrencyProvider(api_key='12345'), ''),
],
'default_currencies': ['EUR', 'btc', 'USD', 'ron'],
},
},
]
@pytest.mark.parametrize('test_spec', test_spec_normal_alts)
def test_normal(test_spec, in_memory_cache, mock_currency_provider):
with reset_instance(
Preferences,
LanguageService,
TimezoneService,
UnitsService,
CurrencyService,
), in_memory_cache(), mock_providers(mock_currency_provider):
lang = test_spec['language']['lang']
default_cities = test_spec['time']['default_cities']
units_conversion_mode = test_spec['units']['conversion_mode']
cache_frequency = test_spec['currency']['cache_frequency']
currency_providers = test_spec['currency']['providers']
default_currencies = test_spec['currency']['default_currencies']
preferences = Preferences()
preferences.language.set(lang)
preferences.time.set_default_cities(default_cities)
preferences.units.set_conversion_mode(units_conversion_mode)
preferences.currency.enable_cache(cache_frequency)
for provider, api_key in currency_providers:
preferences.currency.add_provider(provider, api_key)
preferences.currency.set_default_currencies(default_currencies)
preferences.commit()
assert preferences.language.lang == LanguageService().lang == 'en_US'
assert (
preferences.time.default_cities == TimezoneService().default_cities
)
default_cities = preferences.time.default_cities
default_cities = [
{k: d[k] for k in ['name', 'country', 'cc', 'timezone']}
for d in default_cities
]
default_cities = map(dict.items, default_cities)
default_cities = sorted(default_cities)
default_cities_expected = [
{
'name': 'Athens',
'country': 'Greece',
'cc': 'GR',
'timezone': 'Europe/Athens',
},
{
'name': 'New York City',
'country': 'United States',
'cc': 'US',
'timezone': 'America/New_York',
},
]
default_cities_expected = map(dict.items, default_cities_expected)
default_cities_expected = sorted(default_cities_expected)
assert default_cities == default_cities_expected
assert (
preferences.units.conversion_mode
== UnitsService()._conversion_mode
== UnitsService.ConversionMode.CRAZY
)
assert (
preferences.currency.cache_enabled
== CurrencyService().cache_enabled
is True
)
assert (
preferences.currency.cache_update_frequency
== CurrencyService()._cache._update_frequency
== 100000
)
assert (
preferences.currency.default_currencies
== CurrencyService().default_currencies
== ['EUR', 'BTC', 'USD', 'RON']
)
assert sorted(map(str, preferences.currency.providers)) == sorted(
map(
str,
(
ECBCurrencyProvider,
MyCurrencyNetCurrencyProvider,
CoinbaseCurrencyProvider,
FixerIOCurrencyProvider,
),
)
)
fixerio = FixerIOCurrencyProvider
api_key = CurrencyService()._provider._api_providers[fixerio]._api_key
assert api_key == '12345'
assert CurrencyService().is_running is True
CurrencyService().stop()
|
[
"tilemachos.charalampous@gmail.com"
] |
tilemachos.charalampous@gmail.com
|
1e05cbb73c5027fc77895f2305d2b92435f132bd
|
d78dfc5089717fc242bbd7097f507d811abb4260
|
/Australian/plugin.audio.xmanradio/default.py
|
63359c01e88a80070c20c23d0240592a259e357e
|
[] |
no_license
|
tustxk/AddOnRepo
|
995b980a9ec737e2c25bed423fc83f710c697e40
|
6b86a06cb37e6e10b4119584dd7311ebc2318e54
|
refs/heads/master
| 2022-10-08T21:34:34.632346
| 2016-10-28T09:48:01
| 2016-10-28T09:48:01
| 70,684,775
| 1
| 1
| null | 2022-10-01T16:27:13
| 2016-10-12T09:31:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,570
|
py
|
import urllib,urllib2,re
import xbmcplugin,xbmcgui
def CATEGORIES():
addLink('2GB Sydney Talk Radio','http://shoutcast.2gb.com/listen.pls?sid=1','http://www.2gb.com/sites/all/themes/two_gb/logo.png')
addLink('ABC Radio Australia','http://shoutmedia.abc.net.au:10442/','http://d1i6vahw24eb07.cloudfront.net/s25557q.png')
addLink('702 ABC Sydney','http://shoutmedia.abc.net.au:10436/','http://d1i6vahw24eb07.cloudfront.net/s9118q.png')
addLink('Aussie: All australian all the time','mms://winstream.sportalhosting.com/Aussie','http://d1i6vahw24eb07.cloudfront.net/s148654q.png')
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def addLink(name,url,iconimage):
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
return ok
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
params=get_params()
url=None
name=None
mode=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
print "Mode: "+str(mode)
print "URL: "+str(url)
print "Name: "+str(name)
if mode==None or url==None or len(url)<1:
print ""
CATEGORIES()
xbmcplugin.endOfDirectory(int(sys.argv[1]))
|
[
"ke.xiao@netxeon.com"
] |
ke.xiao@netxeon.com
|
0cb7b6bf8d1f2e68c82b658c9e3017c637000e3f
|
760077feec25e70eff09fac5950e9e9dfb079de3
|
/main.py
|
987b7597855f1cf4819e20a6621d5c7aa324a759
|
[] |
no_license
|
leonidas/flask-spa-routing-example
|
2f9a0980361abf6f2ea2b37b5a44d92cb5f18d9d
|
a41304b9c54fb12190bb090aaa81b8f546400315
|
refs/heads/master
| 2021-01-10T08:36:57.287257
| 2016-01-11T10:48:29
| 2016-01-11T10:48:29
| 49,418,239
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,157
|
py
|
#!/usr/bin/env python
from __future__ import print_function
from functools import wraps
from flask import Flask, send_from_directory, redirect, g
app = Flask(__name__, static_folder=None) # disable default static file serving
app.debug = True
def logged_in():
return True # change this to False to emulate non-logged-in users
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not logged_in():
return redirect('/login')
return f(*args, **kwargs)
return decorated_function
@app.route("/api/v1/hello")
@login_required
def hello():
return "Hello World!"
# in production, these are served by nginx
if app.debug:
@app.route('/login')
def login_page():
return send_from_directory('static', 'login.html')
@app.route('/', defaults={'path': 'index.html'})
@app.route("/<path:path>")
@login_required
def static_file(path):
return send_from_directory('static', path)
@app.errorhandler(404)
@login_required
def send_index(path):
return send_from_directory('static', 'index.html')
if __name__ == "__main__":
app.run()
|
[
"santtu@pajukanta.fi"
] |
santtu@pajukanta.fi
|
41cb367b316c5bb963352f90c9f4d50259ce82c1
|
58f57a9e6a0052cf0e9f80bd46d0b68b656f14a4
|
/calplus/conf/compute.py
|
0efc397471690fbbd2d18b6f869aedf3b299470c
|
[
"Apache-2.0"
] |
permissive
|
HPCC-Cloud-Computing/CALplus
|
43d9a73bd0d7e45c6be865eacbbf6a40cb42aa7e
|
82542aa8aa083af8b09b8178a1385a9e23bc4f68
|
refs/heads/master
| 2020-04-09T05:23:08.196563
| 2018-12-04T06:45:46
| 2018-12-04T06:45:46
| 160,062,194
| 0
| 1
|
Apache-2.0
| 2018-12-04T06:45:47
| 2018-12-02T15:37:50
| null |
UTF-8
|
Python
| false
| false
| 490
|
py
|
from oslo_config import cfg
compute_group = cfg.OptGroup('compute',
title='Compute Options')
# some config options here
driver_path = cfg.StrOpt(
'driver_path',
default='calplus.v1.compute.drivers',
help='Default path to compute drivers',
)
ALL_OPTS = ([driver_path])
def register_opts(conf):
conf.register_group(compute_group)
conf.register_opts(ALL_OPTS, group=compute_group)
def list_opts():
return {compute_group: ALL_OPTS}
|
[
"nghia.duontrung16@gmail.com"
] |
nghia.duontrung16@gmail.com
|
55a7741ede2a93c7fdb436e43848d3120a4d5851
|
4aa7a4d0525095725eb99843c83827ba4806ceb1
|
/ML/m04_xor4_keras.py
|
0c6c7136a58e809cea8cedab9bf03c489f5836e1
|
[] |
no_license
|
seonukim/Study
|
65a70f5bdfad68f643abc3086d5c7484bb2439d4
|
a5f2538f9ae8b5fc93b5149dd51704e8881f0a80
|
refs/heads/master
| 2022-12-04T17:04:31.489771
| 2020-08-21T00:35:15
| 2020-08-21T00:35:15
| 260,144,755
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,144
|
py
|
import numpy as np
from keras.models import Sequential
from keras.layers import Dense
from sklearn.svm import LinearSVC, SVC
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
# 1. 데이터
x_data = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
y_data = np.array([0, 1, 1, 0])
print(x_data.shape)
print(y_data.shape)
# 2. 모델
# model = LinearSVC()
# model = SVC()
# model = KNeighborsClassifier(n_neighbors = 1)
model = Sequential()
model.add(Dense(1000, input_shape = (2, )))
model.add(Dense(1, activation = 'sigmoid'))
model.summary()
# 3. 훈련
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics = ['acc'])
model.fit(x_data, y_data, epochs = 100, batch_size = 1)
# 4. 평가 예측
res = model.evaluate(x_data, y_data)
x_test = np.array([[0, 0], [1, 0], [0, 1], [1, 1]])
y_predict = model.predict(x_test)
# acc = accuracy_score([0, 1, 1, 0], y_predict) # keras의 evaluate와 동일하다고 보면 됨; accuracy_score
# print(x_test, "의 예측 결과 : ", y_predict)
# print("acc : ", acc)
print("acc : ", res[1])
print(y_predict)
|
[
"92.seoonooo@gmail.com"
] |
92.seoonooo@gmail.com
|
9306b9a533b5b5ec86432111a1786e7d22879193
|
fcaf9c408e1ec8c1a6fd94c98126f22a05678cb1
|
/Array_Sequence/largestContinuousSum.py
|
e529b3e88dcb49af3f8ce0d789d7b36d5087693e
|
[] |
no_license
|
rishabh-in/DataStructure-Python
|
65908526e6d06b76aa369c31df8a3fc3c9e06b6d
|
1df607678f72de6b4b712fc6aadc5fd5e2890c9a
|
refs/heads/master
| 2022-11-22T21:26:19.546266
| 2020-07-29T08:18:28
| 2020-07-29T08:18:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
def larg_cont_sum(arr):
if len(arr) == 0:
return 0
max_Sum = current_Sum = arr[0]
for num in arr[1:]:
current_Sum = max(current_Sum + num, num)
max_Sum = max(current_Sum, max_Sum)
return max_Sum
result = larg_cont_sum([1, 2, -1, 3, 4, 10, 10, -10, -1])
print(result)
|
[
"noreply@github.com"
] |
rishabh-in.noreply@github.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.