content
stringlengths 5
1.05M
|
|---|
from django.contrib import admin
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
from django.core.urlresolvers import reverse
from django.utils.html import escape
from django import forms
from django.forms.models import inlineformset_factory, BaseInlineFormSet
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import ReadOnlyPasswordHashField
from journal import models as app_models
# User admin:
# TODO: organization list in list_display
# TODO: article count (published, new etc)
if Group in admin.site._registry:
admin.site.unregister(Group)
class JournalAdmin(admin.ModelAdmin):
class Media:
js = ('admin/js/jquery.init-global.js',
'js/jquery.autosize.min.js',
'admin/js/misc.js')
class SectionNameInline(admin.TabularInline):
model = app_models.SectionName
extra = len(settings.LANGUAGES)
max_num = len(settings.LANGUAGES)
class SectionAdmin(JournalAdmin):
raw_id_fields = ['moderators']
list_display = ('__unicode__', 'display_moderators', 'articles_count')
search_fields = ['sectionname__name']
inlines = [SectionNameInline]
def display_moderators(self, obj=None):
if obj:
return u', '.join(map(unicode, obj.moderators.all()))
return u''
display_moderators.short_description = _(u'Moderators')
def articles_count(self, obj=None):
if obj:
return obj.article_set.all().count()
return u''
articles_count.short_description = _(u'Articles')
# TODO: check staff memebership for moderators, make select with only valid choices
# TODO: count all published, all in moderation separately
# TODO: pending reviews
class OrganizationLocalizedContentInline(admin.StackedInline):
model = app_models.OrganizationLocalizedContent
extra = len(settings.LANGUAGES)
max_num = len(settings.LANGUAGES)
class OrganizationAdmin(JournalAdmin):
inlines = [OrganizationLocalizedContentInline]
list_display = ('__unicode__', 'short_name', 'moderation_status', 'obsolete', 'display_site')
list_filter = ('moderation_status', 'obsolete')
search_fields = ('organizationlocalizedcontent__name', 'alt_names', 'short_name',
'previous__organizationlocalizedcontent__name', 'previous__alt_names')
raw_id_fields = ['previous']
def display_site(self, obj=None):
if obj and obj.site:
return mark_safe(u'<a href="%s" target="_blank">%s</a>' % (escape(obj.site), escape(obj.site)))
return u''
display_site.short_description = _(u'Site')
display_site.admin_order_field = 'site'
class ArticleSourceInline(admin.TabularInline):
model = app_models.ArticleSource
extra = 0
class ArticleResolutionInline(admin.TabularInline):
model = app_models.ArticleResolution
extra = 0
raw_id_fields = ['reviews']
class ArticleAuthorInline(admin.TabularInline):
model = app_models.ArticleAuthor
extra = 0
raw_id_fields = ('user', 'organization')
class ArticleAttachInline(admin.TabularInline):
model = app_models.ArticleAttach
extra = 0
class LocalizedArticleContentInline(admin.StackedInline):
model = app_models.LocalizedArticleContent
extra = len(settings.LANGUAGES)
max_num = len(settings.LANGUAGES)
class ReviewInline(admin.StackedInline):
model = app_models.Review
extra = 0
fields = ('reviewer', 'status', 'date_created', 'comment_for_authors', 'comment_for_editors', 'resolution', 'render')
readonly_fields = ('render', 'date_created')
view_on_site = False
class ArticleAdmin(JournalAdmin):
search_fields = ('old_number', 'localizedarticlecontent__title', 'localizedarticlecontent__abstract',
'localizedarticlecontent__references', 'articleauthor__user__localizedname__last_name')
list_filter = ('status', 'type', 'issue', 'sections')
list_display = ('display_title', 'id', 'status', 'type', 'issue', 'date_published', 'date_in', 'display_authors', 'display_reviews')
inlines = (LocalizedArticleContentInline, ArticleAuthorInline, ArticleSourceInline, ArticleAttachInline, ReviewInline, ArticleResolutionInline)
raw_id_fields = ['senders']
readonly_fields = ['article_link']
fieldsets = (
(None, {'fields': (('status', 'article_link'), 'doi', 'issue', ('date_in', 'date_published'), 'old_number')}),
(None, {'fields': ('content', )}),
(None, {'fields': ('senders', 'image', 'type', 'lang', 'report', 'sections')}),
)
def formfield_for_manytomany(self, db_field, request, **kwargs):
if db_field.name == "sections":
kwargs['widget'] = forms.CheckboxSelectMultiple
return super(ArticleAdmin, self).formfield_for_manytomany(db_field, request, **kwargs)
def article_link(self, obj):
if obj.issue:
return settings.SITE_URL + obj.get_absolute_url()
else:
return u''
article_link.short_description = _(u'Article link')
def display_title(self, obj=None, max_length=64):
if not obj:
return u''
if len(obj.title) > max_length:
return unicode(obj)[:max_length-4].rstrip() + '...'
return unicode(obj)
display_title.short_description = _(u'Title')
def display_authors(self, obj=None):
if not obj:
return u''
out = []
for user, orgs in obj.get_authors().iteritems():
out.append(u'<a href="%s" target="_blank">%s</a>' % (
reverse('admin:journal_journaluser_change', args=[user.id]), user.str_compact()))
return mark_safe(u', '.join(out))
display_authors.short_description = _(u'Authors')
def display_reviews(self, obj=None):
if not obj:
return u''
out = []
for review in obj.review_set.all():
out.append(u'%s: %s' % (
review.reviewer.str_compact(),
u'%s - %s' % (review.get_status_display(),
review.get_resolution_display()) if review.resolution else review.get_status_display()))
return mark_safe(u'<br />'.join(out))
display_reviews.short_description = _(u'Reviews')
class ReviewFieldAdmin(JournalAdmin):
list_display = ('name', 'field_type')
class LocalizedIssueContentInline(admin.StackedInline):
model = app_models.LocalizedIssueContent
max_num = len(settings.LANGUAGES)
extra = len(settings.LANGUAGES)
fields = ('lang', 'title', 'description')
class IssueAdmin(JournalAdmin):
inlines = (LocalizedIssueContentInline, )
list_display = ('__unicode__', 'is_active', 'articles_count')
def articles_count(self, obj=None):
if obj:
return obj.article_set.all().count()
return u''
articles_count.short_description = _(u'Articles')
class LocalizedNameInline(admin.TabularInline):
model = app_models.LocalizedName
extra = len(settings.LANGUAGES)
max_num = len(settings.LANGUAGES)
class StaffMemberInline(admin.StackedInline):
extra = 0
model = app_models.StaffMember
max_num = 1
ordering = ['id']
class JournalUserChangeForm(forms.ModelForm):
password = ReadOnlyPasswordHashField(label=_("Password"),
help_text=_("Raw passwords are not stored, so there is no way to see "
"this user's password, but you can change the password "
"using <a href=\"password/\">this form</a>."))
class Meta:
model = app_models.JournalUser
fields = '__all__'
def __init__(self, *args, **kwargs):
super(JournalUserChangeForm, self).__init__(*args, **kwargs)
f = self.fields.get('user_permissions', None)
if f is not None:
f.queryset = f.queryset.select_related('content_type')
def clean_password(self):
return self.initial["password"]
class PositionInOrganizationInline(admin.TabularInline):
model = app_models.PositionInOrganization
extra = 0
raw_id_fields = ('organization', )
class JournalUserAdmin(UserAdmin):
form = JournalUserChangeForm
add_form = forms.ModelForm
inlines = (StaffMemberInline, LocalizedNameInline, PositionInOrganizationInline)
fieldsets = (
(None, {'fields': ('email', 'password')}),
(_('Permissions'), {'fields': ('moderation_status', 'is_active', 'is_staff', 'is_superuser')}),
(_('Profile'), {'fields': ('degree', )}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide', ),
'fields': ('email',),
}),
)
readonly_fields = ('last_login', 'date_joined')
list_display = ('__unicode__', 'email', 'is_active', 'moderation_status', 'is_staff')
list_filter = ('is_staff', 'is_active', 'moderation_status', 'staffmember__editor')
search_fields = ('localizedname__first_name', 'localizedname__last_name', 'email')
ordering = None # handled inside get_queryset
def get_formsets_with_inlines(self, request, obj=None):
if obj is None:
return ()
return super(JournalUserAdmin, self).get_formsets_with_inlines(request, obj)
def get_queryset(self, request):
from django.db import models
qs = self.model._default_manager.get_queryset()
# For ordering by localizedname__last_name
ordering = self.get_ordering(request) + ('localizedname__last_name__max', )
qs = qs.annotate(models.Max('localizedname__last_name')).order_by(*ordering).distinct()
return qs
admin.site.register(app_models.Organization, OrganizationAdmin)
admin.site.register(app_models.Section, SectionAdmin)
admin.site.register(app_models.Article, ArticleAdmin)
admin.site.register(app_models.ReviewField, ReviewFieldAdmin)
admin.site.register(app_models.Issue, IssueAdmin)
admin.site.register(app_models.JournalUser, JournalUserAdmin)
|
import click
def yellow(message):
return click.style(message, fg="yellow", bold=True)
def red(message):
return click.style(message, fg="red", bold=True)
def green(message):
return click.style(message, fg="green", bold=True)
def gray(message):
return click.style(message, fg="white", bold=False)
def white(message):
return click.style(message, fg="white", bold=True)
def prompt(message):
return click.style(message, fg="green")
def default(message):
return white(message)
def blue(message):
return click.style(message, fg="blue", bold=True)
def format_command(a, b="", prefix=""):
return white(prefix) + blue("%s: " % a) + white(b)
|
# -*- coding: utf-8 -*-
value1 = input()
value2 = input()
prod = value1+value2
print("SOMA = " + str(prod))
|
from datetime import date
import boundaries
boundaries.register('Grimsby wards',
domain='Grimsby, ON',
last_updated=date(2016, 1, 1),
name_func=lambda f: 'Ward %s' % f.get('Ward'),
id_func=boundaries.attr('Ward'),
authority='Town of Grimsby',
licence_url='https://niagaraopendata.ca/pages/open-government-license-2-0-grimsby',
encoding='iso-8859-1',
extra={'division_id': 'ocd-division/country:ca/csd:3526065'},
)
|
# -*- coding: utf-8 -*-
import re
import glob
from geotext import GeoText
#ES: error tracking code for py 2 -------------
import logging
import time
import os
#----------------------------------------------
def compileSubs(folderName,fileName,files,t_list,interviewer,interviewee,pass2,language,resampleSubtitles,removeStamps,removeLoneWords):
#files = [['_High_freq_timestamping',0,False]]
for fil in files:
#fil = ['name',60,False]
#print fil
min_timelapse_between_stamps = fil[1]
place_based_timestamping = fil[2]
if place_based_timestamping:
min_timelapse_between_stamps = 10000000000000
list_false_positives = ['David','Un','Mon']
#ES: LIST OF SUBTITLE FILE SNIPPETS
# Find all sub in the folder
sub_files = glob.glob(folderName + '/*.vtt')
#print sub_files
#ES: list of NEW subtitles file elements
new_sub = ["WEBVTT\nKind: captions\nLanguage: "+str(language)]
c = 0
t_list_pos = 0
#ES: USEFUL FUNC PROBABLY
def hms_to_s(time):
time = unicode(time, "UTF-8")
time = time.split(" --> ")
#print time
#print "--------"
try:
t_0 = time[0].split(":")
t_1 = time[1].split(":")
except IndexError as e:
print e
print "var 'time' in function 'hms_to_s' not valid"
print t_0
print t_1
exit()
t0 = float(int(t_0[0])*3600) + int(float(t_0[1])*60) + int(float(t_0[2]))
t1 = float(int(t_1[0])*3600) + int(float(t_1[1])*60) + int(float(t_1[2]))
t0 = float(t_0[0])*3600 + float(t_0[1])*60 + float(t_0[2])
t1 = float(t_1[0])*3600 + float(t_1[1])*60 + float(t_1[2])
#return [int(t0),int(t1)]
#print t0,t1
return [t0,t1]
def s_to_hms(seconds):
#print seconds
m, sec = divmod(seconds, 60)
h, m = divmod(m, 60)
#print str(int(h)) + ":" + str(int(m)) + ":" + str(int(s))
#return str(int(h)) + ":" + str(int(m)) + ":" + str(int(sec))
return str(int(h)) + ":" + str(int(m)) + ":" + str(float("{0:.2f}".format(round(sec,2))))
last = [0,0]
offset = last
new_line = True
sentence = []
ee = ""
first_pass = True
last_timestamp = 0
cc = 0
ccc = -1
sub_times = []#ES: new final subtitles file list of timestamps
sub_text = []#ES: new final subtitles file list of text elements
#ES: custom list for the last timestamp of the prev vid (table_sentenses). this way we keep RG's resampling while maintaining original final timestamp of youtube-created timestamps.
last_t22 = []
#FUNCTION process-transform-aggregate-snippets
z = 0
#ES: FOR EACH SNIPPET IN LIST OF .VTT SNIPPETS
for s in sub_files:
#print ""
#ES: GO
if (pass2 == False and s != folderName + "/" + fileName + ".vtt") or (pass2 == True and "pass" in s):
print ""
print s
c += 1
#FUNCTION extract-to-table_sentenses
#ES: text READLINES IN .VTT FILES
with open(s) as f:
text = f.readlines()
text = [x.strip() for x in text]
table_sentenses = []
row = []
whereAreWe = s
#ES: for line in snippet
for t in text:
count = 0
#ES: clean snippet line
while count < 3:
try:
t = t.replace('\n','')
from chardet import detect
encoding = lambda x: detect(x)['encoding']
#print t,encoding(t)
if ' ' in t:
t = t.replace(' ','')
#print t
if '…' in t:
t = t.replace('…','...')
#print t
#ES: if line is not Nothing
if t != "":
#print t,t != "" and t[0].isdigit() and ":" in t,t != "" and (not t[0].isdigit() and ":" not in t) and "WEBVTT" not in t and "Kind" not in t and "Language" not in t
#ES: if first element in line is a digit and there is a colon in that line
if t != "" and t[0].isdigit() and t[2] is ':':
#print t
if row != []:
table_sentenses.append(row)
row = []
#ES: t needs to be turned into numbers in the current format and be part of a list in format of text3 so that it the said list can be put through the t in text3 loop (below, line 291)
#ES: last_t is t transformed into seconds (numbers)
last_t = hms_to_s(t)
row.append(last_t)
if t != "" and (t[0].isdigit() == False or t[2] is not ":") and "WEBVTT" not in t and "Kind" not in t and "Language" not in t:
t = t.replace(" ","")
t = t.replace("\xc2\xab...\xc2\xbb","[Pause]")
t = t.replace(" "," ")
t = t.replace(". ",".&&&")
#t = t.replace(' "','"')
#t = t.replace('" ','"')
t = t.replace("‘’",'"')
#t = t.replace("[Pause]","[Pause]&&&")
t = t.replace("[Pause]","[...]")
t = t.replace("? ","?&&&")
#ES: adding conditional statements to preserve full functionality when user inputs no interviewer/ee variables.
if interviewer != "":
t = t.replace(interviewer,"&&&" + interviewer)
if interviewee != "":
t = t.replace(interviewee,"&&&" + interviewee)
#ES: split t after periods or before person names (interviewee/interviewer).
sentences = t.split("&&&")
#print sentences
for s in sentences:
if interviewer != "":
if s != '' and s != interviewer + ": ":
row.append(s.strip().replace(" "," "))# + ".")
else:
if s != '':
row.append(s.strip().replace(" "," "))
#print row
#print ""
except Exception as ex:
logging.exception('full stack trace:')
print "--------"
print "retrying to read t from ", whereAreWe, " - try #", count+1
print "will retry 3 times. If unsuccesful, you will encounter an error. If so, run 'main.py' again with only 'combineSubtitles' set to True and it should work"
count += 1
time.sleep(8)
continue
break
#END-FUNCTION extract-to-table_sentenses
#ES: add a period to end of last line in snippet to make it look like ".."
try:
lastChar = row[len(row)-1][len(row[len(row)-1])-1]
except IndexError:
print "IndexError was handled for var 'row': ", row
print "Using len(row)-2 instead..."
lastChar = row[len(row)-2][len(row[len(row)-1])-1]
if lastChar != "." or lastChar != "," or lastChar != "?" or lastChar != "!":
row[len(row)-1] += "."
#ES: identifies final timestamp and appends to list of snippet sentences.
table_sentenses.append(row)
last_t2 = [last_t[1],last_t[1]]
#ES: last_t22 is a list of raw final timestamps that are used as offsets (i added this to fix the lagging concatenated timestamps)
last_t22.append([last_t[1],last_t[1]])
if len(last_t22) > 1:
z += 1
last_t22[z] = [a+b for a, b in zip(last_t22[z], last_t22[z-1])]
table_sentenses.append([last_t2])
"""
print "printing table_sentenses"
print "======================="
print table_sentenses
"""
#FUNCTION table_sentenses-to-text2
#ES: this is the code that is RESAMPLING and remixing the text...
#ES: should this be ignored?
if resampleSubtitles == True:
resampled_text = []
for ts in range(len(table_sentenses)):
#print table_sentenses[ts]
previousInterpolated = [0,0]
#print table_sentenses[ts]
if len(table_sentenses[ts]) > 0:
listOfElements = []
prev_ts_2 = 0
for ts_2 in table_sentenses[ts]:
if isinstance(ts_2, list) == False:
listOfElements.append([prev_ts_2,len(ts_2) + prev_ts_2])
prev_ts_2 += len(ts_2)
c_ts2 = 0
for ts_2 in table_sentenses[ts]:
if isinstance(ts_2, list) == False:
interpolated = len(ts_2)
maximum = listOfElements[len(listOfElements)-1][1]
#print ts_2,table_sentenses[ts][0]
#print c_ts2,table_sentenses[ts][0],(float(listOfElements[c_ts2][0]) / float(maximum)) * (table_sentenses[ts][0][1] - table_sentenses[ts][0][0]) + table_sentenses[ts][0][0]
#(10/100)*500+20
interpolated = [(float(listOfElements[c_ts2][0]) / float(maximum)) * (table_sentenses[ts][0][1] - table_sentenses[ts][0][0]) + table_sentenses[ts][0][0],0]
c_ts2 += 1
resampled_text.append(interpolated)
resampled_text.append(ts_2)
#else:
#resampled_text.append(ts_2)
#print ""
# for tts in range(len(table_sentenses[ts])):
# if (str(table_sentenses[ts][tts][0]).isalpha() or table_sentenses[ts][tts][0] == '[' or table_sentenses[ts][tts][0] == 'à' or table_sentenses[ts][tts][0] == 'é') and (str(table_sentenses[ts][tts-1][0]).isalpha() or table_sentenses[ts][tts-1][0] == '[' or table_sentenses[ts][tts-1][0] == 'à' or table_sentenses[ts][tts-1][0] == 'é'):
# try:
# interpolated = [float(previous_time[0])+(((table_sentenses[ts+1][0][0]-float(previous_time[0]))/(len(table_sentenses[ts][1])+len(table_sentenses[ts][2])))*len(table_sentenses[ts][1])),0]
# previousInterpolated = interpolated #- moins
# except:
# interpolated = [(float(previous_time[0]) + float(previous_time[1])) / 2.0,0]
# #previousInterpolated = interpolated
# interpolated = [float("{0:.2f}".format(round(interpolated[0],2))),0]
# resampled_text.append(interpolated)
# resampled_text.append(table_sentenses[ts][tts])
# else:
# if isinstance(table_sentenses[ts][tts][0], int) or isinstance(table_sentenses[ts][tts][0], float):
# previous_time = table_sentenses[ts][tts]
# resampled_text.append(table_sentenses[ts][tts])
# else:
# resampled_text.append(table_sentenses[ts][tts])
#for iiiii in resampled_text:
# print iiiii
#print resampled_text[len(resampled_text)-1]
text2 = [resampled_text[0]]
# Retirer les timestamps qui coupent les phrases
#ES: the current conditional loop is necessary where the previous process is applied, and should therefore be removed along with it
if removeStamps == True:
for rt in range(len(resampled_text)-1):
passAnyway = False
if ((isinstance(resampled_text[rt][0], int) == False and isinstance(resampled_text[rt][0], float) == False) and resampled_text[rt][len(resampled_text[rt])-1] != "." and resampled_text[rt][len(resampled_text[rt])-1] != "!" and resampled_text[rt][len(resampled_text[rt])-1] != "?"):
rien = 0
if (isinstance(resampled_text[rt+1][0], int) == False and isinstance(resampled_text[rt+1][0], float) == False):
#print "---------",resampled_text[rt+1]
passAnyway = True
else:
#print resampled_text[rt+1]
text2.append(resampled_text[rt+1])
if passAnyway == True:
#print resampled_text[rt+1]
text2.append(resampled_text[rt+1])
else:
for rt in range(len(resampled_text)-1):
text2.append(resampled_text[rt+1])
#ES: forget text2, same as table_sentenses but resampled and reformatted slightly
# [[41.99, 43.44], 'grande famille de six enfants; plus Papa et', 'Maman \xc3\xa7a fait huit.', 'Et on est tous ici au'], [[43.44, 48.4], 'Qu\xc3\xa9bec.', 'S.I.:: Et quel est votre \xc3\xa9tat matrimonial?'], [[48.4, 52.92], 'A.M.::Pas mari\xc3\xa9e.', 'S.I.:: Pas mari\xc3\xa9e, O.K.', 'Vous avez dit que'],
#to
#[43.0775, 0], 'Et on est tous ici au', [43.44, 0], 'Qu\xc3\xa9bec.', [44.218039215686275, 0], 'S.I.:: Et quel est votre \xc3\xa9tat matrimonial?', [48.4, 0], 'A.M.::Pas mari\xc3\xa9e.', [49.77898305084746, 0], 'S.I.:: Pas mari\xc3\xa9e, O.K.', [51.617627118644066, 0], 'Vous avez dit que',
#END-FUNCTION table_sentenses-to-text2
"""
print "printing text2"
print "======================="
print text2
"""
#ES: new interpolated 'smart' times (which need removing)
#FUNCTION reformat-text2-as-vtt-list/text3
text3 = ['WEBVTT','Kind: captions','Language: '+str(language),'']
for t2 in text2:
#print t2
if isinstance(t2[0], int):
t2[0],t2[1] = float(t2[0]),float(t2[1])
if isinstance(t2[0], float):
for aa in text2:
#print 2,aa
if (isinstance(aa[0], float) or isinstance(aa[0], int)) and aa[0] > t2[0]:
#print 2,aa
break
if not isinstance(aa[0],str):
t2[1] = aa[0]
else:
t2[1] = t2[0] + 3.0
#print 3,t2
#t2 = [s_to_hms(t2[0]),s_to_hms(t2[1])]
#print s_to_hms(t2[0])#.split(':')
#print type(t2[1])
#ES: function for turning time list into srt format timestamp
t2 = str(s_to_hms(t2[0]).split(':')[0]) + ":" + str(s_to_hms(t2[0]).split(':')[1]) + ":" + str(s_to_hms(t2[0]).split(':')[2]) + " --> " + str(s_to_hms(t2[1]).split(':')[0]) + ":" + str(s_to_hms(t2[1]).split(':')[1]) + ":" + str(s_to_hms(t2[1]).split(':')[2])
#ES: t2 is now same format as t
text3.append(t2)
#END-FUNCTION reformat-text2-as-vtt-list/text3
#del text3[-1]
#ES: text3 is yet another iteration of the snippet version, this time with time formatted as TT ---> TT instead of [TT,TT] like text2
#ES: might need to put table_sentenses through the spinner that transformed text2 into text3
#ES: the following loop could have table_sentenses as input...
#ES: this is the "cumulative" or "concatenation" loop (so there might be something wrong going on here)
#ES: requires that what is being looped is a list in format of text3
"""
print "printing text3"
print "======================="
print text3
"""
#ES: code added as an option to avoid using the resampling method and simply use native Youtube sub style
else:
text3 = ['WEBVTT','Kind: captions','Language: '+str(language),'']
for t in table_sentenses:
for el in t:
if isinstance(el, list) == True:
timestampES = str(s_to_hms(t[0][0]).split(':')[0]) + ":" + str(s_to_hms(t[0][0]).split(':')[1]) + ":" + str(s_to_hms(t[0][0]).split(':')[2]) + " --> " + str(s_to_hms(t[0][1]).split(':')[0]) + ":" + str(s_to_hms(t[0][1]).split(':')[1]) + ":" + str(s_to_hms(t[0][1]).split(':')[2])
text3.append(timestampES)
else:
text3.append(el)
#ES: AT THIS POINT, TEXT3 IS SAME FORMAT AS PREV TEXT3 EXCEPT THAT TIMESTAMPS NATURALLY DON'T OCCUR AT EVERY 2ND ELEMENT
#ES: once the snippet text is scanned and transposed into the list 'text3', each line in text3 is looped through for concatenation.
#print "TEXT3: ",text3
#FUNCTION concatenate_text3
for t in text3:
if "WEBVTT" not in t and "Kind" not in t and "Language" not in t:
#ES: if timestamp
#ES: change this conditional slightly if t is in table_sentenses instead of text3
if t != "" and t[0].isdigit() and t[1] is ':':
#ES: if timestamp is in between first and last timestamp in snippet
"""
print "t", t#timestamp being processed
print "hms_to_s(t)",hms_to_s(t)[0]#the timestamp in seconds (for comparison)
print "last", last#last represents the previous timestamp.
print "offset", offset#offset inherited by last
"""
#ES: when last is at the beginning of a new snippet loop, it is naturally larger than the present timestamp (since it represents the last timestamp from the prev snippet), triggering the following code
if hms_to_s(t)[0] < last[0]:
#ES: in RG's code, offset is equal to last (last is the last timestamp of the prev snippet). However, since these last timestamps are erroneous and go beyond the actual video length due to the resampling of subtitles by RG, i am using the raw Youtube final (un-resampled) timestamps as offsets.
try:
offset = [t_list[t_list_pos],t_list[t_list_pos]]
#print "t_list",t_list
#print "t_list_pos",t_list_pos
#print "offsets used: " + str(offset)
t_list_pos += 1
except IndexError as e:
print e
print "\nvar 't_list' doesn't have any timestamps in it because you are not running the whole pipeline. Program will proceed by using the last timestamp of the previous .vtt file as offset."
offset = last
pass
"""
print "last: ",last
print "offset: ",offset
print "t : "+ str(t)
print "t_list : "+ str(t_list)
print "t_list_pos : "+ str(t_list_pos)
"""
#offset = [last_t22[z-1][0],last_t22[z-1][1]]
#ES: OFFSET is the variable containing the cumulative timestamps
#print 1,offset[1]
#print ""
#ES: when dealing with the first timestamp in a vtt file, make the opening time of this caption 0.0, otherwise concatenation can be problematic, resulting in subtitle lag (i.e. youtube doesn't always set the first caption to minute 0.0 in a video snippet)
t_1 = t.split(" --> ")
t_1[0] = "0:0:0.0"
t_2 = " --> ".join(t_1)
t = t_2
timestamp = hms_to_s(t)
else:
timestamp = hms_to_s(t)
"""
print "timestamp", timestamp#ES: t to number array
"""
#ES: t will be directly in the form taken by timestamp if we use table_sentenses instead of text3
#print 1,t,hms_to_s(t)
new_timestamp = "\n" + s_to_hms(timestamp[0] + offset[1]) + " --> " + s_to_hms(timestamp[1] + offset[1])
"""
print "new timesamp", new_timestamp#ES: new_timestamp adds offset[1] (end-timestamp of last timestamp in prev snippet) to current t
"""
#new_sub.append(new_timestamp)
last = hms_to_s(t)
t = new_timestamp
#print t
t = hms_to_s(t)
"""
print "t new_timestamp", t#ES: new timestamp in s form.
"""
t = [float("{0:.2f}".format(round(t[0],2))),float("{0:.2f}".format(round(t[1],2)))]
"""
print "t rounded", t#ES: removes nanoseconds (worth keeping since these are only very small, rare numbers)
"""
t = s_to_hms(t[0]) + " --> " + s_to_hms(t[1])
"""
print "t final", t
"""
#print t,offset[1]
latest_timestamp = s_to_hms(timestamp[1] + offset[1])#ES: the end-time of the current (new, unrounded) timestamp
#ES: I don't see the point in processing t and latest_timestamp...
#print len(sentence),sentence
#ES: following code is not needed. it doesn't run if place_based_timestamping is False (which it always is)
place = False
#ES: following code is not needed
if len(sentence) > 0:
ee = ""
for e in sentence:
ee += e
countries,cities = GeoText(ee).countries,GeoText(ee).cities
nbr_places = 0
for countr in countries:
if countr not in list_false_positives:
nbr_places += 1
for citi in cities:
if citi not in list_false_positives:
nbr_places += 1
#print cities
if place_based_timestamping == False:
nbr_places = 0
#if len(GeoText(ee).countries) > 0 or len(GeoText(ee).cities) > 0:
if nbr_places > 0 or " là-bas " in ee or " chez " in ee or " vers " in ee:
place = True
#print GeoText(ee).countries,GeoText(ee).cities,"là-bas" in ee , "chez" in ee , "vers" in ee
#print ""
#places.cities
if fil[2] == False:
place = False
#ES: not sure what the following "passes" are about...
if (hms_to_s(t)[0]-last_timestamp > min_timelapse_between_stamps or first_pass == True):
delay_passed = True
first_pass = False
else:
delay_passed = False
#ES ADDED 24/03. removing delaypassed to see what this does..
# it doesn't do any good
#delay_passed = False
if delay_passed == True or place == True:
#ES new_sub.append(new_timestamp)
sub_times.append(new_timestamp)
last_timestamp = hms_to_s(t)[0]
#print "new_timestamp: ", new_timestamp
#print "new_sub.append(new_timestamp): ", new_sub
#print "sub_times.append(new_timestamp): ", sub_times
#print "last_timestamp: ", last_timestamp
#ES: adds interviewer and interviewee codes
if len(sentence) > 0:
ee = ""
for e in sentence:
if interviewer != "":
#print e
if interviewer[:-1] in e and interviewer not in e:
#print e
e = e.replace(interviewer[:-1],interviewer)
#print e
if interviewee != "":
if interviewee[:-1] in e and interviewee not in e:
e.replace(interviewee[:-1],interviewee)
#print e
e.replace(" ", "")
e.rstrip('\n')
if ".:" in e and len(ee) > 0:
ee += "\n"
ee += e + " "
#print ""
#print "2. ee : ", ee
if ee != '':
new_sub.append(ee)
sub_text.append(ee)
new_sub.append(new_timestamp)
sentence = []
#print "3. new_sub: ", new_sub
else:
#sentence = []
#print sentence#,t
if len(t) > 0:
sentence.append(t)
#print "4. sentence: t: ", t
#END_FUNCTION concatenate_text3
"""
"""
#print new_sub
#ES: set to false to prevent redistribution of lone words
if removeLoneWords == True:
ee = ""
for e in sentence:
#print e
e.replace(" ", "")
e.rstrip('\n')
if ".:" in e and len(ee) > 0:
ee += "\n"
ee += e + " "
new_sub.append(ee)
sub_text.append(ee)
new_sub2 = ["WEBVTT\nKind: captions\nLanguage: "+str(language)]
c = 0
#ES: 2nd PROCESS... cleans the resampled subs..
for i in range(len(sub_times)):
#print sub_text[i]
for s in sub_text[i].split("\n"):
# If there is a lone word on a line
if len(s.split()) == 1:
sub_text[i-1] = sub_text[i-1] + s
sub_text[i] = sub_text[i].replace(s + "\n",'')
# if there is a lone word, separated by a "." at the begining or end of a line
#print sub_text[i]
if sub_text[i][-2] != '?' and sub_text[i][-2] != '!' and sub_text[i][-2] != '.' and sub_text[i][-2] != ',' and sub_text[i][-2] != ';' and sub_text[i][-2] != ':':
#print s
for p in s.split("."):
p = p.strip()
if len(p) > 1 and len(p.split()) == 1 and p != ":O":# and p != '[Pause]':
#print p,len(p),p.split(),len(p.split())
c += 1
if p.replace(" ","") in s.split()[0]:
#print s,p,"devant"
sub_text[i-1] = sub_text[i-1] + p.replace(" ","") + "."
sub_text[i] = sub_text[i].replace(p + ". ",'')
elif p.replace(" ","") in s.split()[-1]:
#print s,p,"derrière"
sub_text[i+1] = p.replace(" ","") + " " + sub_text[i+1]
sub_text[i] = sub_text[i].replace(p.replace(" ",""),'')
new_sub2.append(sub_times[i])
new_sub2.append(sub_text[i])
#END-FUNCTION process-transform-aggregate-snippets
#for i in new_sub2:
# print i
new_sub2 = ["WEBVTT\nKind: captions\nLanguage: "+str(language)]
for i in range(len(sub_times)):
new_sub2.append(sub_times[i])
if not sub_text[i].isspace():
new_sub2.append(sub_text[i])
#for i in new_sub2:
# print i
"""
print "=========================="
print "new_sub2"
print new_sub2
"""
new_sub = new_sub2
#print new_sub
sub = ""
sub_srt = ""
sub_txt = ""
sub_txt_debug = ""
all_time_stamps =[]
#print "printing new_sub or text3: ",new_sub
for i in new_sub:
#if "A.M" in i and "A.M.:" not in i:
if i[1].isdigit() and i[2] is ':':
#print i,hms_to_s(i)
all_time_stamps.append(hms_to_s(i))
#all_time_stamps.append([float(hms_to_s(i)[0]),float(hms_to_s(i)[1])])
#ES WRITE EXCEPTION
for i in range(len(all_time_stamps)):
all_time_stamps[i][0] = s_to_hms(all_time_stamps[i][0])
try:
all_time_stamps[i][1] = s_to_hms(all_time_stamps[i+1][0])
except:
all_time_stamps[i][1] = s_to_hms(all_time_stamps[i][1])
all_time_stamps[len(all_time_stamps)-1][1] = latest_timestamp
all_time_stamps2 = []
for i in all_time_stamps:
if len(i[0].split(":")[0]) == 1:
h1 = "0" + i[0].split(":")[0]
else:
h1 = i[0].split(":")[0]
if len(i[0].split(":")[1]) == 1:
m1 = "0" + i[0].split(":")[1]
else:
m1 = i[0].split(":")[1]
if len(i[0].split(":")[2]) == 1:
s1 = "0" + i[0].split(":")[2]
else:
s1 = i[0].split(":")[2]
if len(i[1].split(":")[0]) == 1:
h2 = "0" + i[1].split(":")[0]
else:
h2 = i[1].split(":")[0]
if len(i[1].split(":")[1]) == 1:
m2 = "0" + i[1].split(":")[1]
else:
m2 = i[1].split(":")[1]
if len(i[1].split(":")[2]) == 1:
s2 = "0" + i[1].split(":")[2]
else:
s2 = i[1].split(":")[2]
#ES: add zeros to seconds
if s2[1] == '.':
s2 = '0' + s2
if len(s2) == 4:
s2 = s2 + '00'
if len(s2) == 5:
s2 = s2 + '0'
if s1[1] == '.':
s1 = '0' + s1
if len(s1) == 4:
s1 = s1 + '00'
if len(s1) == 5:
s1 = s1 + '0'
"""
if i[10] == ' ':
i = i[:10] + '0' + i[10:]
if i[9] == ' ':
i = i[:9] + '00' + i[9:]
if len(i) == 26:
i = i + '0'
if len(i) == 25:
i = i + '00'
"""
all_time_stamps2.append([h1 + ":" + m1 + ":" + s1,h2 + ":" + m2 + ":" + s2])
c = 0
for i in new_sub:
#ES: the below replacement was done for berthe's story, since there were many distracting " ... " in the transcript.
i = i.replace('" ... "','[...]')
i = i.replace('::',':')
i = i.replace('‘’','"')
i = i.replace(' ',' ')
if i[1].isdigit() and i[2] is ':':
i = "\n" + all_time_stamps2[c][0] + " --> " + all_time_stamps2[c][1]
c += 1
if i != "" or not i.isspace():
sub += i + "\n"
c = 0
for i in new_sub:
i = i.replace('‘’','"')
i = i.replace(' ',' ')
if i[1].isdigit() and i[2] is ':':
i = "\n" + str(c+1) + "\n" + all_time_stamps2[c][0] + " --> " + all_time_stamps2[c][1]
c += 1
if (i != "" or not i.isspace()) and "WEBVTT" not in i:
sub_srt += i + "\n"
c = 0
for i in new_sub:
i = i.replace('‘’','"')
i = i.replace(' ',' ')
if i[1].isdigit() and i[2] is ':':
#i = "\n" + all_time_stamps2[c][0]
i = "[" + all_time_stamps2[c][0] + "]"
c += 1
if (i != "" or not i.isspace()) and "WEBVTT" not in i:
if interviewer != "":
i = i.replace(interviewer + ":", interviewer + "::")
if interviewee != "":
i = i.replace(interviewee + ":", interviewee + "::")
i = i.replace("\n"," ")
sub_txt += i + " "
c = 0
for i in new_sub:
i = i.replace('‘’','"')
i = i.replace(' ',' ')
if i[1].isdigit() and i[2] is ':':
i = "\n" + all_time_stamps2[c][0]
#i = "[" + all_time_stamps2[c][0] + "]"
c += 1
if (i != "" or not i.isspace()) and "WEBVTT" not in i:
if interviewer != "":
i = i.replace(interviewer + ":", interviewer + "::")
if interviewee != "":
i = i.replace(interviewee + ":", interviewee + "::")
sub_txt_debug += i + "\n"
if fil[1] <= 10:
print "\nprinting final subtitles to files...\n"
try:
os.mkdir(folderName + "/output")
except Exception as e:
print "Output folder already exists. \nOverwriting..."
thefile = open(folderName + "/output/" + fileName + "_" + language + ".vtt", 'w')
thefile.write(sub)
thefile = open(folderName + "/output/" + fileName + "_" + language + ".srt", 'w')
thefile.write(sub_srt)
thefile = open(folderName + "/output/" + fileName + fil[0] + "_" + language + ".txt", 'w')
thefile.write(sub_txt)
thefile = open(folderName + "/output/" + fileName + fil[0] + "_" + language + "_readable" + ".txt", 'w')
thefile.write(sub_txt_debug)
try:
os.remove(folderName + "/" + "delete me.txt")
except:
pass
print "Compiled subtitle files are now available in '" + folderName + "/output' folder."
return sub_srt
"""
except Exception as ex:
print "an error occurred"
print ex
logging.exception('full stack trace:')
pass
"""
"""
interviewer = "S.G."
interviewee = "O.G."
folderName = 'Oscar'
fileName = 'Oscar_complete'
originalVideo = "Oscar.mp4"
compiledSubs = compileSubs(folderName,fileName,[['_High_freq_timestamping',0,False]],interviewer,interviewee,True)
"""
|
""" Implementation of Cosmic RIM estimator"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
physical_devices = tf.config.experimental.list_physical_devices('GPU')
print(physical_devices)
assert len(physical_devices) > 0, "Not enough GPU hardware devices available"
config = tf.config.experimental.set_memory_growth(physical_devices[0], True)
import tensorflow_probability as tfp
import numpy as np
import os, sys, argparse, time
from scipy.interpolate import InterpolatedUnivariateSpline as iuspline
from scipy.interpolate import interp1d
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import flowpm
from flowpm import linear_field, lpt_init, nbody, cic_paint
from flowpm.utils import r2c3d, c2r3d
sys.path.append('../../utils/')
import tools
import tools
from getbiasparams import getbias
import diagnostics as dg
class HaloData():
def __init__(self, args):
self.args = args
@tf.function
def pmpos(self, linear):
args = self.args
if args.nbody:
print('Nobdy sim')
state = lpt_init(linear, a0=args.a0, order=args.lpt_order)
final_state = nbody(state, args.stages, args.nc)
else:
print('ZA/2LPT sim')
final_state = lpt_init(linear, a0=args.af, order=args.lpt_order)
tfinal_field = cic_paint(tf.zeros_like(linear), final_state[0])
return tfinal_field, final_state[0]
@tf.function
def biasfield(self, linear, bias):
args = self.args
b1, b2 = bias[0], bias[1]
final_field, fpos = self.pmpos(linear)
w0 = tf.reshape(linear, (linear.shape[0], -1))
w0 = w0 - tf.expand_dims(tf.reduce_mean(w0, 1), -1)
w2 = w0*w0
w2 = w2 - tf.expand_dims(tf.reduce_mean(w2, 1), -1)
weight = b1*w0 + b2*w2
bmodel = cic_paint(tf.zeros_like(linear), fpos, weight = weight)
return bmodel
@tf.function(
# input_signature=[tf.TensorSpec(shape=[1, nc, nc, nc], dtype=tf.float32),
# tf.TensorSpec(shape=[1, nc, nc, nc], dtype=tf.float32),
# tf.TensorSpec(shape=[2], dtype=tf.float32),
# tf.TensorSpec(shape=[nc, nc, nc], dtype=tf.float32) ]
)
def recon(self, linear, data, bias, errormesh):
args = self.args
bs, nc = args.bs, args.nc
kmesh = args.kmesh
priorwt = args.priorwt
linear = tf.reshape(linear, data.shape)
bmodel = self.biasfield(linear, bias)
residual = bmodel - data
base = residual
resk = r2c3d(base, norm=nc**3)
reskmesh = tf.square(tf.cast(tf.abs(resk), tf.float32))
chisq = tf.reduce_sum(tf.multiply(reskmesh, 1/errormesh))
#chisq = tf.reduce_mean(tf.multiply(reskmesh, 1/errormesh))
#chisq = chisq * bs**3/nc**1.5
if args.prior:
lineark = r2c3d(linear, norm=nc**3)
priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
prior = tf.reduce_mean(tf.multiply(priormesh, 1/priorwt))
prior = prior * bs**3/nc**1.5
else:
print('\nRIM does not use prior\n')
prior = 0.
loss = chisq + prior
return loss, chisq, prior
##
## bmodel = self.biasfield(linear, bias)
## residual = bmodel - data
## resk = r2c3d(residual, norm=args.nc**3)
## reskmesh = tf.square(tf.cast(tf.abs(resk), tf.float32))
## chisq = tf.reduce_sum(tf.multiply(reskmesh, 1/errormesh))
## chisq = chisq*bs**3 /nc**1.5
##
## if args.prior:
## lineark = r2c3d(linear, norm=args.nc**3)
## priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
## prior = tf.reduce_mean(tf.multiply(priormesh, 1/args.priorwt))
## prior = prior*bs**3 / nc**1.5
## else: prior = 0.
## loss = chisq + prior
##
## return loss, chisq, prior
## #return loss*nc**3, chisq*nc**3, prior*nc**3
##
##
@tf.function
def recon_grad(self, x, y, bias, errormesh):
with tf.GradientTape() as tape:
tape.watch(x)
loss = self.recon(x, y, bias, errormesh)[0]
grad = tape.gradient(loss, x)
return grad
@tf.function
def reconstruct_loss(self, linear, data, bias, errormesh, Rsm=tf.constant(0.), useprior=True):
"""
"""
args = self.args
bs, nc = args.bs, args.nc
kmesh = args.kmesh
priorwt = args.priorwt
linear = tf.reshape(linear, data.shape)
bmodel = self.biasfield(linear, bias)
residual = bmodel - data
base = residual
print("\nAdd annealing section to graph\n")
Rsmsq = tf.multiply(Rsm*bs/nc, Rsm*bs/nc)
smwts = tf.exp(tf.multiply(-kmesh**2, Rsmsq))
basek = r2c3d(base, norm=nc**3)
basek = tf.multiply(basek, tf.cast(smwts, tf.complex64))
base = c2r3d(basek, norm=nc**3)
resk = r2c3d(base, norm=nc**3)
reskmesh = tf.square(tf.cast(tf.abs(resk), tf.float32))
chisq = tf.reduce_mean(tf.multiply(reskmesh, 1/errormesh))
chisq = chisq * bs**3/nc**1.5
if useprior:
lineark = r2c3d(linear, norm=nc**3)
priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
prior = tf.reduce_mean(tf.multiply(priormesh, 1/priorwt))
prior = prior * bs**3/nc**1.5
else: prior = 0.
loss = chisq + prior
return loss #, chisq, prior
## residual = bmodel - data
## resk = r2c3d(residual, norm=args.nc**3)
##
## print('\nAdd annealing graph\n')
## Rsmsq = tf.multiply(Rsm*bs/nc, Rsm*bs/nc)
## smwts = tf.exp(tf.multiply(-args.kmesh**2, Rsmsq))
## resk = tf.multiply(resk, tf.cast(smwts, tf.complex64))
##
## reskmesh = tf.square(tf.cast(tf.abs(resk), tf.float32))
## logprob = tf.reduce_mean(tf.multiply(reskmesh, 1/errormesh))
## logprob = logprob *bs**3/nc**1.5
##
## #Prior
## if useprior:
## lineark = r2c3d(linear, norm=nc**3)
## priormesh = tf.square(tf.cast(tf.abs(lineark), tf.float32))
## prior = tf.reduce_mean(tf.multiply(priormesh, 1/args.priorwt))
## prior = tf.multiply(prior, bs**3 / nc**1.5, name='prior')
## else: prior = 0.
## loss = logprob + prior
##
## return loss
##
def reconstruct(self, data, bias, errormesh, RRs=[1.0, 0.0], niter=100, lr=0.1, x_init=None, useprior=True):
print('reconstructing')
args = self.args
bs, nc = args.bs, args.nc
@tf.function
def grad(x, Rsm):
with tf.GradientTape() as tape:
tape.watch(x)
loss = self.reconstruct_loss(x, data, bias, errormesh, Rsm, useprior=useprior)
grad = tape.gradient(loss, x)
return grad
# Create an optimizer for Adam.
opt = tf.keras.optimizers.Adam(learning_rate=lr)
##Reconstruction
if x_init is None:
x_init = np.random.normal(0, 1, nc**3).reshape(data.shape).astype(np.float32)
linear = tf.Variable(name='linmesh', shape=data.shape, dtype=tf.float32,
initial_value=x_init, trainable=True)
for iR, RR in enumerate(RRs):
print('For smoothing scale : ', RR)
for i in range(niter):
grads = grad([linear], tf.constant(RR, dtype=tf.float32))
opt.apply_gradients(zip(grads, [linear]))
minic = tf.reshape(linear, data.shape)
#
print('\nminimized\n')
minsample = self.biasfield(minic, bias) #tf.constant(minic, dtype=tf.float32), bias).numpy()
return minic, minsample
def setupbias(self, traindata, nsims = 10, cutoff=1.5):
args = self.args
bs, nc = args.bs, args.nc
b1, b2, perr = [], [], []
for i in range(nsims):
idx = np.random.randint(0, traindata.shape[0], 1)
xx = traindata[idx, 0].astype(np.float32)
yy = traindata[idx, 1].astype(np.float32)
_, fpos = self.pmpos(tf.constant(xx))
fpos = fpos[0].numpy() *bs/nc
bparams, bmodel = getbias(bs, nc, yy[0]+1, xx[0], fpos)
bmodeltf = self.biasfield(xx, tf.constant([bparams[0], bparams[1]], dtype=tf.float32)).numpy()
errormesh = yy - bmodeltf# np.expand_dims(bmodel, 0)
kerror, perror = tools.power(errormesh[0]+1, boxsize=bs)
kerror, perror = kerror[1:], perror[1:]
perr += [perror]
b1 += [bparams[0]]
b2 += [bparams[1]]
print("b1 : %0.3f $\pm$ %0.2f"%(np.array(b1).mean(), np.array(b1).std()))
print("b2 : : %0.3f $\pm$ %0.2f"%(np.array(b2).mean(), np.array(b2).std()))
b1, b2 = np.array(b1).mean(), np.array(b2).mean()
perr = np.array(perr).mean(axis=0)
kny = nc*np.pi/bs
cutoff = 1.5
perr[np.where(kerror > cutoff*kny)] = perr[np.where(kerror > cutoff*kny)[0][0]]
ipkerror = interp1d(kerror, perr, bounds_error=False, fill_value=(perr[0], perr.max()))
errormesh = tf.expand_dims(tf.constant(ipkerror(args.kmesh), dtype=tf.float32), 0)
#ipkerror = lambda x: 10**np.interp(np.log10(x), np.log10(kerror), np.log10(perr))
#errormesh = tf.constant(ipkerror(args.kmesh), dtype=tf.float32)
bias = tf.constant([b1, b2], dtype=tf.float32)
return bias, errormesh
#
def get_data(args, testfrac=0.9):
bs, nc = args.bs, args.nc
nsims = args.nsims
numd = args.numd
try: R=args.Rstd
except: R=128
path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'
#path = '/project/projectdirs/m3058/chmodi/rim-data/halos/z00/'
path = path + '/L%04d_N%04d_D%04d//'%(bs, nc, numd*1e4)
alldata = np.array([np.load(path + 'S%04d.npy'%i) for i in range(100, 100+nsims)]).astype(np.float32)
if args.posdata: traindata, testdata = alldata[:int(testfrac*nsims), [0,1]], alldata[int(testfrac*nsims):, [0,1]]
else: traindata, testdata = alldata[:int(testfrac*nsims), [0,2]], alldata[int(testfrac*nsims):, [0,2]]
if args.stdinit:
initdata = np.array([np.load(path + 'stdR%d_S%04d.npy'%(R,i)) for i in range(100, 100+nsims)]).astype(np.float32)
traindatainit, testdatainit = initdata[:int(testfrac*nsims)], initdata[int(testfrac*nsims):]
traindata = np.concatenate([traindata, np.expand_dims(traindatainit, 1)], axis=1)
testdata = np.concatenate([testdata, np.expand_dims(testdatainit, 1)], axis=1)
return traindata, testdata
def get_diff_spectra(args, ipklin, nsims=10, nsteps=3):
bs, nc = args.bs, args.nc
nsims = args.nsims
numd = args.numd
try: R=args.Rstd
except: R=128
ncf=args.ncf
path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'
dpath = path + '/L%04d_N%04d_D%04d//'%(bs, nc, numd*1e4)
alldata = np.array([np.load(dpath + 'S%04d.npy'%i) for i in range(100, 100+nsims)]).astype(np.float32)
initdata = np.array([np.load(dpath + 'stdR%d_S%04d.npy'%(R,i)) for i in range(100, 100+nsims)]).astype(np.float32)
try:
dyn = "%02dstep"%nsteps
path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'
path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn)
final = np.array([tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps)) for seed in range(100, 100+nsims)]).astype(np.float32)
except:
dyn = "%02dstep_B1"%nsteps
path = '//mnt/ceph/users/cmodi/cosmo4d/z00/'
path = path + '/L%04d_N%04d_%s//'%(bs, nc, dyn)
final = np.array([tools.readbigfile(path + '/L%04d_N%04d_S%04d_%02dstep/mesh/d/'%(bs, nc, seed, nsteps)) for seed in range(100, 100+nsims)]).astype(np.float32)
print('alldata shape :', alldata.shape)
pdiffs, bb = [], []
for j in range(nsims):
k, pfin = tools.power(final[j], boxsize=bs)
ph = tools.power(1+alldata[j, 1], boxsize=bs)[1]
bias = ((ph[1:5]/pfin[1:5])**0.5).mean()
bb.append(bias)
recon = initdata[j] / bias
precon =tools.power(1+recon, boxsize=bs)[1]
pdiff = ipklin(k) - precon
pdiffs.append(pdiff)
pdiff = np.array(pdiffs).mean(axis=0)
bias = np.array(bb).mean(axis=0)
xx, yy = k[pdiff > 0], pdiff[pdiff > 0]
ipkdiff = lambda x: 10**np.interp(np.log10(x), np.log10(xx), np.log10(yy))
return ipkdiff, bias
def get_ps(bs, iterand, truth):
ic, fin = truth
ic1, fin1 = iterand
pks = []
#if abs(ic1[0].mean()) < 1e-3: ic1[0] += 1
#if abs(ic[0].mean()) < 1e-3: ic[0] += 1
k, p1 = tools.power(ic1[0], boxsize=bs)
k, p2 = tools.power(ic[0], boxsize=bs)
k, p12 = tools.power(ic1[0], f2=ic[0], boxsize=bs)
pks.append([p1, p2, p12])
#if fin1[0].mean() < 1e-3: fin1[0] += 1
#if fin[0].mean() < 1e-3: fin[0] += 1
k, p1 = tools.power(fin1[0], boxsize=bs)
k, p2 = tools.power(fin[0], boxsize=bs)
k, p12 = tools.power(fin1[0], f2=fin[0], boxsize=bs)
pks.append([p1, p2, p12])
return k, pks
def check_im(xx, x_init, pred, fname=None):
fig, ax = plt.subplots(1, 3, figsize = (12, 4))
vmin, vmax = xx.sum(axis=0).min(), xx.sum(axis=0).max()
ax[0].imshow(xx.sum(axis=0), vmin=vmin, vmax=vmax)
ax[0].set_title('Truth')
ax[1].imshow(x_init.sum(axis=0), vmin=vmin, vmax=vmax)
ax[1].set_title('initial point')
ax[2].imshow(pred.sum(axis=0), vmin=vmin, vmax=vmax)
ax[2].set_title('RIM recon')
if fname is not None: plt.savefig(fname)
else: plt.savefig(ofolder + 'rim-im.png')
plt.close()
def check_2pt(datamodel, rim_test, fid_test, grad_params, fname):
nc, bs = datamodel.args.nc, datamodel.args.bs
fig, axar = plt.subplots(2, 2, figsize=(9, 9))
a, b = rim_test
xx, yy = a
x_init, pred = b
k, pks = get_ps(bs, [x_init.numpy(), datamodel.biasfield(x_init, grad_params[0]).numpy()], [xx.numpy(), yy.numpy()])
for i in range(2):
ax = axar[i]
ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C0:', lw=1, label='Init')
ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C0:', lw=1)
k, pks = get_ps(bs, [pred.numpy(), datamodel.biasfield(pred, grad_params[0]).numpy()], [xx.numpy(), yy.numpy()])
for i in range(2):
ax= axar[i]
ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d'%1, label='RIM')
ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d'%1)
#
a, b = fid_test
xx, yy = a
pred_adam, pred_adam10, minic = b
k, pks = get_ps(bs, [pred_adam.numpy(), datamodel.biasfield(pred_adam, grad_params[0]).numpy()], [xx.numpy(), yy.numpy()])
for i in range(2):
ax = axar[i]
ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d:'%2, lw=1, label='Adam')
ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d:'%2, lw=0.5)
k, pks = get_ps(bs, [pred_adam10.numpy(), datamodel.biasfield(pred_adam10, grad_params[0]).numpy()], [xx.numpy(), yy.numpy()])
for i in range(2):
ax = axar[i]
ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d'%2, label='Adam x10')
ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d'%2)
k, pks = get_ps(bs, [minic.numpy(), datamodel.biasfield(minic, grad_params[0]).numpy()], [xx.numpy(), yy.numpy()])
for i in range(2):
ax = axar[i]
ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d-'%3, label='Annealed')
ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d-'%3)
for axis in axar.flatten():
axis.legend()
axis.semilogx()
axis.grid(which='both')
for ax in axar:
ax[0].set_ylim(-0.1, 1.2)
ax[1].set_ylim(-0.5, 2.5)
plt.savefig(fname)
plt.close()
##def check_2pt(xx, yy, rim, grad_fn, grad_params, compares, nrim=10, fname=None):
## truemesh = [xx[0], yy[0]]
## rimpreds = []
## for it in range(nrim):
## x_init = np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
## #x_init = (yy - (yy.max() - yy.min())/2.)/yy.std() + np.random.normal(size=xx.size).reshape(xx.shape).astype(np.float32)
## pred = rim(tf.constant(x_init), tf.constant(yy), grad_fn, grad_params)[-1]
## rimpreds.append([pred[0].numpy(), biasfield(pred, grad_params[0])[0].numpy()])
##
## fig, ax = plt.subplots(1, 2, figsize=(9, 4), sharex=True)
## for ip, preds in enumerate(rimpreds):
## k, pks = tools.get_ps(preds, truemesh, bs)
## for i in range(2):
## lbl = None
## if ip == 0 and i == 0: lbl = 'Linear'
## if ip == 0 and i == 1: lbl = 'Final'
## ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d-'%i, alpha=0.4, label=lbl)
## ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d-'%i, alpha=0.4)
##
## lss = ['-', '--', ':', '-.']
## lws = [ 1, 1, 2, 2]
## lbls = ['Adam', 'Adam 10x', 'Best recon']
## #for ip, preds in enumerate([pred_adam, pred_adam10]):
## for ip, preds in enumerate(compares):
## k, pks = tools.get_ps(preds, truemesh, bs)
## for i in range(2):
## lbl = None
## if i == 0: lbl = lbls[ip]
## ax[0].plot(k, pks[i][2]/(pks[i][0]*pks[i][1])**0.5, 'C%d'%i, ls=lss[ip+1], lw=lws[ip+1])
## ax[1].plot(k, (pks[i][0]/pks[i][1])**0.5, 'C%d'%i, label=lbl, ls=lss[ip+1], lw=lws[ip+1])
##
## for axis in ax:
## axis.semilogx()
## axis.grid(which='both')
## axis.legend(fontsize=12)
## axis.set_xlabel('k(h/Mpc)', fontsize=12)
## ax[0].set_ylim(-0.1, 1.2)
## ax[1].set_ylim(-0.5, 2.0)
## ax[0].set_ylabel('$r_c$', fontsize=12)
## ax[1].set_ylabel('$t_f$', fontsize=12)
## plt.tight_layout()
## if fname is not None: plt.savefig(fname)
## else: plt.savefig('rim-2pt.png')
## plt.close()
##
##
|
def center_align(text, max_length):
to_add = max_length - len(text)
pad = ' ' * (to_add//2)
return ''.join([pad, text, pad])
|
'''
Let's plot Brady's theory for our variables...
'''
import sys
import os
import numpy as np
import math
import matplotlib.pyplot as plt
def computeActivity(VP, TAUR, SIG):
return (3.0 * VP * TAUR / SIG)
tau_R = 1.0 / 3.0
sigma = 1.0
particle_Area = np.pi * ((sigma / 2.0)**2)
def computeBradyMono(PHI, PE, APART, PHICP):
'''Computes pressure, straight from the Brady paper'''
ndense = APART / PHI
return ndense * ((PE**2)/6.0) * ( 1 - PHI - PHI**2 + PHI*(9.0/(2.0 * PE)) * (PHICP/(1-PHI)) )
def compute2DMonoSpinodal(PHI, PHICP):
top = (3 * 0.2 * (PHI**2)) + (2 * PHI) - 1
term = 1.0 - (PHI/PHICP)
bottom1 = 2.0 * PHI * ((term)**-1)
bottom2 = ((PHI**2)/PHICP) * ((term)**-2)
return top / ((4.0 / np.pi) * (bottom1 + bottom2))
def compute3DMonoSpinodal(PHI, PHICP):
top = (3 * (PHI**2)) + (2 * PHI) - 1
term = 1.0 - (PHI/PHICP)
bottom1 = 2.0 * PHI * ((term)**-1)
bottom2 = ((PHI**2)/PHICP) * ((term)**-2)
return top / (3.0 * (bottom1 + bottom2))
inPhis = np.arange(0.2, 0.9, 0.001)
PeRs2D = np.zeros_like(inPhis)
PeRs3D = np.zeros_like(inPhis)
Pes2D = np.zeros_like(inPhis)
Pes3D = np.zeros_like(inPhis)
#phi02D = np.pi / (2.0 * np.sqrt(3.0))
phi02D = 0.9 # this is what Brady uses
phi03D = 0.64 # this is what Brady uses
for i in xrange(len(inPhis)):
PeRs2D[i] = compute2DMonoSpinodal(inPhis[i], phi02D)
PeRs3D[i] = compute3DMonoSpinodal(inPhis[i], phi03D)
# This fixes issues with plotting against conventional Pe
if inPhis[i] <= 0.444:
Pes2D[i] = 1000
else:
Pes2D[i] = (PeRs2D[i])**-1 * (3.0/2.0)
if inPhis[i] <= 0.336 or inPhis[i] >= 0.6:
Pes3D[i] = 1000
else:
Pes3D[i] = (PeRs3D[i])**-1 * (3.0/2.0)
# Fix PeR weirdness
if inPhis[i] >= 0.62:
PeRs3D[i] = 10**-4
# Plotting individually:
#plt.semilogy(inPhis, PeRs2D)
#plt.xlim(0.2, 0.9)
#plt.ylim(2*10**-3, 4*10**-1)
#plt.xlabel(r'$\phi$')
#plt.ylabel(r'$Pe_{R}$')
#plt.title(r'2D Spinodal')
#plt.show()
#
#plt.plot(inPhis, Pes2D)
#plt.xlim(0.2, 0.9)
#plt.ylim(0, 500)
#plt.xlabel(r'$\phi$')
#plt.ylabel(r'$Pe$')
#plt.title(r'2D Spinodal')
#plt.show()
#
#plt.plot(inPhis, Pes3D)
#plt.xlim(0.2, 0.7)
#plt.ylim(0, 500)
#plt.xlabel(r'$\phi$')
#plt.ylabel(r'$Pe$')
#plt.title(r'3D Spinodal')
#plt.show()
## Make a figure
#fig, ax = plt.subplots(1, 4, figsize=(14,4))
#ax[0].semilogy(inPhis, PeRs2D)
#ax[1].semilogy(inPhis, PeRs3D)
#ax[2].plot(inPhis, Pes2D)
#ax[3].plot(inPhis, Pes3D)
## Limits
#ax[0].set_xlim(0.2, 0.9)
#ax[1].set_xlim(0.2, 0.9)
#ax[2].set_xlim(0.2, 0.9)
#ax[3].set_xlim(0.2, 0.9)
#ax[0].set_ylim(10**-3, 10**-1)
#ax[1].set_ylim(10**-3, 10**-1)
#ax[2].set_ylim(0, 500)
#ax[3].set_ylim(0, 500)
## Labels
#ax[0].set_title(r'2D')
#ax[1].set_title(r'3D')
#ax[2].set_title(r'2D')
#ax[3].set_title(r'3D')
#ax[0].set_ylabel(r'$Pe_{R}$')
#ax[2].set_ylabel(r'$Pe$')
#ax[0].set_xlabel(r'$\phi$')
#ax[1].set_xlabel(r'$\phi$')
#ax[2].set_xlabel(r'$\phi$')
#ax[3].set_xlabel(r'$\phi$')
#ax[0].text(0.75, 1.1, 'Spinodal (vs. $Pe_{R}$)', size=16, transform=ax[0].transAxes)
#ax[2].text(0.75, 1.1, 'Spinodal (vs. $Pe$)', size=16, transform=ax[2].transAxes)
## Ticks
#ax[1].set_yticklabels([])
#ax[3].set_yticklabels([])
#for i in xrange(4):
# ax[i].tick_params(direction='in', which='both')
#plt.tight_layout(pad=2.0, w_pad=0.5, h_pad=1.0)
#plt.savefig('Brady_spinodals.png', dpi=1000)
#plt.close()
# We need to do this with symbolic python
import sympy as sp
# 2D Parameters
beta = 4.0 / np.pi
xi = 0.2
phi0 = 0.90
# 3D Parameters
beta = 3.0
xi = 1.0
phi0 = 0.64
PHI, PER = sp.symbols("PHI PER")
# Each term gets its own line
sp.plot_implicit(sp.Eq(
((PHI / (1.0 - PHI)) *
((1.0/PHI) +
(((beta * PER * phi0) / phi0) * ((1.0-(PHI/phi0))**-1)) +
(3.0*xi*(PHI**2)) -
(PHI) +
(((beta * PER * phi0) / phi0) * (1.0 - phi0) * ((1.0-(PHI/phi0))**-2)) -
(3.0) +
((beta * PER * phi0)))) -
(1.0) +
(2.0*PHI) +
(xi * (PHI**2)) -
(((beta * PER * phi0) / phi0) *
((2.0*PHI)*((1.0-(PHI/phi0))**-1)) +
(((PHI**2)/phi0)*((1.0-(PHI/phi0))**-2)))),
(PHI, 0.2, 0.7),
(PER, 10**-3, 10**1),
yscale='log')
# Modify this plot with the following link
# https://stackoverflow.com/questions/40747474/sympy-and-plotting
|
from pathlib import Path
import numpy as np
import torch
import soundfile as sf
# load file list text file
# comments are escaped using '#'
# file names should be relative to path of the file list
# in case each example corresponds to multiple files, this can be handled by
# omitting extensions in the file list or using only the 'root' part of the file names,
# and let the Dataset load the corresponding files with different extensions and/or suffixes
def load_file_list(fn_filelist_txt):
fn_filelist_txt = Path(fn_filelist_txt)
root_dir = fn_filelist_txt.parent
examples = []
with open(fn_filelist_txt, 'r') as f:
for ln in f:
# handle comment
b_comment = ln.find('#')
if b_comment >= 0:
ln = ln[:b_comment]
# strip white space
ln = ln.strip()
# skip empty lines (e.g. entire line is comment)
if len(ln) == 0:
continue
# add example to list
example = str(root_dir / ln)
examples.append(example)
return examples
class WavDataset(torch.utils.data.Dataset):
# file_list list (full filename without extension)
def __init__(self, file_list, seq_dur, sr, on_too_short='raise'):
super().__init__()
self.file_list = file_list
self.seq_len = int(np.rint(seq_dur*sr))
self.sr = sr
self.on_too_short = on_too_short
def __getitem__(self, index):
fn_base = self.file_list[index]
fn_wav = '{}.wav'.format(fn_base)
# XXX: here, we read the entire file from disk and then do random crop
# alternatively we could use pysoundfile.read()'s start/stop arguments
# we assume that all files are relatively short, and reading the entire
# probably makes caching disk i/o easier for the OS
# read file
x, sr = sf.read(fn_wav, always_2d=True, dtype='float32')
n = len(x)
x = x.T # TC -> CT
sr = float(sr)
assert sr == self.sr
# random crop
if n < self.seq_len:
if self.on_too_short == 'raise':
# XXX: if file is shorter than request file length, just raise exception
# assuming should files are already filtered from the file list when preprocessing the dataset
# alternatively, short files can be zero padded, but this may have some (minor) side effects (affecting loss, gradients, etc.)
raise IOError('file too short for requested training sequence length; pre-filter file list')
elif self.on_too_short == 'pad':
# XXX: maybe in different cases padding should be left/right/centered
raise NotImplementedError('file too short for requested training sequence length; implement padding')
else:
raise ValueError('invalid on_too_short')
else:
b = np.random.randint(0, n - self.seq_len + 1) # [lo, hi[
e = b + self.seq_len
x = x[:, b:e]
# to pytroch tensor
x = torch.from_numpy(x)
return x
def __len__(self):
return len(self.file_list)
|
from common.database_api import DatabaseApi
from common.loggable_api import Loggable
from common.message_broker_api import MessageBrokerApi
class BackendTasksApi(Loggable):
"""
This is an abstract class.
It contains logic common to any backend tasks class.
"""
def __init__(self, service_name, descendant_class_name):
"""
Raises RuntimeError.
Arguments:
- service_name -- str.
- descendant_class_name -- str.
"""
super().__init__(service_name, descendant_class_name)
self._database = DatabaseApi(service_name)
self._message_broker = MessageBrokerApi(service_name)
def consumeMessage(self, callback_function):
"""
Returns void.
Raises RuntimeError.
Arguments:
- callback_function -- Function -- Execution steps upon receiving a message from message broker.
"""
try:
self._debug("consumeMessage", "Start")
self._message_broker.subscribe(callback_function)
self._debug("consumeMessage", "Finish")
except Exception as err:
err_msg = "%s -- consumeMessage -- Failed\n%s" % (self._class_name, str(err))
raise RuntimeError(err_msg)
def createTestDocument(self):
"""
Returns void.
Raises RuntimeError.
"""
try:
self._debug("createTestDocument", "Start")
self._database.createTestDocument({"field_a": "value_a"})
self._debug("createTestDocument", "Finish")
except RuntimeError as err:
err_msg = "%s -- createTestDocument -- Failed\n%s" % (self._class_name, str(err))
raise RuntimeError(err_msg)
def deleteTestDocument(self):
"""
Returns void.
Raises RuntimeError.
"""
try:
self._debug("deleteTestDocument", "Start")
self._database.deleteTestDocumentBy({"field_a": "value_a"})
self._debug("deleteTestDocument", "Finish")
except RuntimeError as err:
err_msg = "%s -- deleteTestDocument -- Failed\n%s" % (self._class_name, str(err))
raise RuntimeError(err_msg)
def publishTestMessage(self):
"""
Returns void.
Raises RuntimeError.
"""
try:
self._debug("publishTestMessage", "Start")
self._message_broker.publish("Test message")
self._debug("publishTestMessage", "Finish")
except Exception as err:
err_msg = "%s -- publishTestMessage -- Failed\n%s" % (self._class_name, str(err))
raise RuntimeError(err_msg)
|
from numpy.core.numeric import normalize_axis_tuple
import torch
import custom_transforms
from path import Path
class DataLoaderCreator():
def __init__(self, args):
self.args = args
def create(self, mode, seq_length=1):
if self.args.dataset_format == 'stacked':
from datasets.stacked_sequence_folders import SequenceFolder
elif self.args.dataset_format == 'sequential':
from datasets.sequence_folders import SequenceFolder
if mode == 'train':
# Data loading code
train_transform = custom_transforms.Compose([
custom_transforms.RandomHorizontalFlip(),
# custom_transforms.RandomScaleCrop(),
custom_transforms.ArrayToTensor(),
custom_transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
print("=> fetching scenes in '{}'".format(self.args.dataset_dir))
train_set = SequenceFolder(
self.args.dataset_dir,
transform=train_transform,
seed=self.args.seed,
train=True,
sequence_length=self.args.sequence_length
)
print('{} samples found in {} train scenes'.format(len(train_set), len(train_set.scenes)))
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=self.args.batch_size, shuffle=not(self.args.no_shuffle),
num_workers=self.args.workers, pin_memory=True)
return train_loader
elif mode == "validate":
# Data loading code
normalize = custom_transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
valid_transform = custom_transforms.Compose([custom_transforms.ArrayToTensor(), normalize])
# if no Groundtruth is avalaible, Validation set is the same type as training set to measure photometric loss from warping
if self.args.with_gt:
if self.args.with_pose:
from datasets.validation_folders import ValidationSetWithPose
val_set = ValidationSetWithPose(
self.args.dataset_dir,
sequence_length=self.args.sequence_length,
transform=valid_transform)
else:
from datasets.validation_folders import ValidationSet
val_set = ValidationSet(
self.args.dataset_dir,
transform=valid_transform
)
else:
val_set = SequenceFolder(
self.args.dataset_dir,
transform=valid_transform,
seed=self.args.seed,
train=False,
sequence_length=self.args.sequence_length,
)
print('{} samples found in {} valid scenes'.format(len(val_set), len(val_set.scenes)))
val_loader = torch.utils.data.DataLoader(
val_set, batch_size=self.args.batch_size, shuffle=False,
num_workers=self.args.workers, pin_memory=True)
return val_loader
elif mode == 'test_dispnet':
if self.args.gt_type == 'KITTI':
from kitti_eval.depth_evaluation_utils import test_framework_KITTI as test_framework
elif self.args.gt_type == 'stillbox':
from stillbox_eval.depth_evaluation_utils import test_framework_stillbox as test_framework
dataset_dir = Path(self.args.dataset_dir)
if self.args.dataset_list is not None:
with open(self.args.dataset_list, 'r') as f:
test_files = list(f.read().splitlines())
else:
test_files = [file.relpathto(dataset_dir) for file in sum([dataset_dir.files('*.{}'.format(ext)) for ext in self.args.img_exts], [])]
print('{} files to test'.format(len(test_files)))
framework = test_framework(dataset_dir, test_files, 1,
self.args.min_depth, self.args.max_depth)
return framework
elif mode == 'test_posenet':
from kitti_eval.pose_evaluation_utils import test_framework_KITTI as test_framework
dataset_dir = Path(self.args.dataset_dir)
framework = test_framework(dataset_dir, self.args.sequences, seq_length)
return framework
elif mode == 'infer_dispnet':
dataset_dir = Path(self.args.dataset_dir)
if self.args.dataset_list is not None:
with open(self.args.dataset_list, 'r') as f:
test_files = [dataset_dir/file for file in f.read().splitlines()]
else:
test_files = sum([list(dataset_dir.walkfiles('*.{}'.format(ext))) for ext in self.args.img_exts], [])
return test_files
|
import numpy as np
from ..affine import power_L
def basil_inner_loop(path_obj,
lagrange_subseq,
initial_data, # (solution, grad) pair
inner_tol=1.e-5,
verbose=False,
initial_step=None,
check_active=False,
step_nvar=50,
candidate_set=None,
solve_args={}):
debug = True
coef_stop = False
# take a guess at the inverse step size
if initial_step is None:
_lipschitz = power_L(path_obj.X, max_its=50)
final_step = 1000. / _lipschitz
else:
final_step = initial_step
# gradient of restricted elastic net at lambda_max
solution, grad_solution = initial_data
solution = solution.copy()
last_grad = grad_solution.copy()
inactive_ranks = path_obj.check_KKT(grad_solution,
solution,
path_obj.alpha * np.min(lagrange_subseq))[1]
linear_predictor = path_obj.X.dot(solution)
unpen = np.zeros(path_obj.group_shape, np.bool)
unpen[path_obj.unpenalized[1]] = True
ever_active = list(path_obj.updated_ever_active(unpen)) # unpenalized groups
solutions = []
lagrange_solved = []
all_failing = np.zeros(path_obj.group_shape, np.bool)
M = min(step_nvar, (inactive_ranks >= 0).sum())
candidate_bool = np.zeros(path_obj.group_shape, np.bool)
subproblem_set = path_obj.updated_ever_active((inactive_ranks < M) * (inactive_ranks >= 0) +
unpen)
if candidate_set is not None:
subproblem_set = sorted(set(subproblem_set + candidate_set))
for lagrange in lagrange_subseq:
(final_step,
subproblem_grad,
subproblem_soln,
subproblem_linpred,
subproblem_vars) = path_obj.solve_subproblem(solution, # for warm start
subproblem_set,
lagrange,
tol=inner_tol,
start_step=final_step,
debug=debug and verbose,
coef_stop=coef_stop,
**solve_args)
saturated_grad = path_obj.saturated_loss.smooth_objective(subproblem_linpred, 'grad')
# as subproblem always contains ever active,
# rest of solution should be 0
solution[subproblem_vars] = subproblem_soln
enet_grad = path_obj.enet_grad(solution,
path_obj._penalized_vars,
lagrange)
grad_solution[:] = (path_obj.full_gradient(path_obj.saturated_loss,
subproblem_linpred) +
enet_grad)
failing_A, failing_I_ranks = path_obj.check_KKT(grad_solution,
solution,
path_obj.alpha * lagrange)
if check_active:
all_failing = failing_A + (failing_I_ranks >= 0)
else:
all_failing = failing_I_ranks >= 0
if not all_failing.sum():
ever_active_incr = path_obj.updated_ever_active(path_obj.active_set(solution))
ever_active = sorted(set(ever_active + ever_active_incr))
linear_predictor[:] = subproblem_linpred
solutions.append(solution.T.copy())
last_grad = grad_solution.copy()
lagrange_solved.append(lagrange)
else:
ever_active1 = ever_active + list(subproblem_set)
ever_active2 = path_obj.updated_ever_active(all_failing)
ever_active = sorted(set(ever_active1 + ever_active2))
break
return (lagrange_solved,
solutions,
last_grad,
ever_active)
def basil(path_obj,
lagrange_seq,
initial_data, # (solution, grad) pair
inner_tol=1.e-5,
verbose=False,
initial_step=None,
check_active=False,
step_nvar=50,
step_lagrange=5,
solve_args={}):
lagrange_solved, solutions, candidate_set = [np.inf], [], []
cur_data = initial_data
while True:
lagrange_start = np.sum(lagrange_seq >= np.min(lagrange_solved))
lagrange_cand = lagrange_seq[lagrange_start:(lagrange_start + step_lagrange)]
if len(lagrange_cand) > 0:
(lagrange_incr,
solution_incr,
last_grad,
candidate_set) = basil_inner_loop(path_obj,
lagrange_cand,
cur_data,
inner_tol=inner_tol,
step_nvar=step_nvar,
candidate_set=candidate_set,
solve_args=solve_args)
if len(solution_incr) > 0:
cur_soln = (solution_incr[-1], last_grad)
solutions.extend(solution_incr)
lagrange_solved.extend(lagrange_incr)
else:
break
return np.array(solutions)
|
'''
Calculate various quantities considered surface brightness such as:
- net counts
- net count Rate
- net photon flux
- net energy flux (two options)
see further documentation
parameters:
evt_file - classic event fits file (e.g. 'acsif_#####_repro_evt2')
if merged ('merged_evt')
energy_range - energy range in electron volts (e.g. 500:2000)
region - region of interest (e.g. 'simple')
background - background .reg file without extension (e.g. 'simple_bkg')
exposure - boolean to use exposure fluxes (e.g. True) (See documentation)
merged - boolean for merged data set or not (e.g. True)
outputs:
.par file containing all calculated quantities (.e.g. 'aprates_'+region+'.par')
Notes:
Usually we use the region name along with the arf files to calculate the monochromatic
energy, but if the data set is merged then we must use the evt_file name (see documentation).
This is handled in the code but be sure to name things appropriately!
'''
from ciao_contrib.runtool import *
from astropy.io import fits
def calc_flux(evt_file,energy_range,region,background,exposure = False,merged = False,merged_obs = ['']):
#Rearrange energy ranges
energies = [float(x) for x in energy_range.split(':')]
energy_range2 = str(energies[0]/1000)+':'+str(energies[1]/1000) #for effective energy (eV)
energy_range3 = str(energies[0]/1000)+'-'+str(energies[1]/1000) #For average effective exposures (eV)
#Get counts for region and background
print("Calculating all data needed to calculate flux")
dmextract.infile = evt_file+".fits[energy="+energy_range+"][bin sky=region("+region+".reg)]"
dmextract.outfile = region+'_counts.fits'
dmextract.opt = 'generic'
dmextract.bkg = evt_file+".fits[energy="+energy_range+"][bin sky=region("+background+".reg)]"
dmextract.clobber = True
dmextract()
dmstat.infile = region+'_counts.fits[cols counts]'
dmstat()
counts = float(dmstat.out_sum)
dmstat.infile = region+'_counts.fits[cols area]'
dmstat()
area = float(dmstat.out_sum)
dmstat.infile = region+'_counts.fits[cols bg_counts]'
dmstat()
bg_counts = float(dmstat.out_sum)
dmstat.infile = region+'_counts.fits[cols bg_area]'
dmstat()
bg_area = float(dmstat.out_sum)
#Set PSF elements
alpha = 1 #PSF fraction in source aperature; 1-perfect
beta = 0 #PSF fraction in background aperature; 0-perfect
#Exposure Time
if merged == False:
hdu = fits.open(evt_file+'.fits')
hdr = hdu[0].header
T_s = hdr['TSTOP']-hdr['TSTART']
T_b = T_s
hdu.close()
#Calculate exposure maps
effen = calc_effenergy(region,energy_range2)
#Create Exposure Map for the band of interest
fluximage.punlearn()
fluximage.infile = evt_file+".fits"
fluximage.outroot = region+"flux/"
fluximage.bands = energy_range2+":"+str(effen)
fluximage.binsize = "1"
fluximage.units = "default"
fluximage.clobber = True
fluximage.cleanup = True
fluximage()
dmstat.punlearn()
dmstat.infile = region+"flux/"+energy_range3+'_thresh.expmap[sky=region('+region+'.reg)]'
dmstat.centroid = False
dmstat()
E_s = dmstat.out_mean
dmstat.punlearn()
dmstat.infile = region+"flux/"+energy_range3+'_thresh.expmap[sky=region('+background+'.reg)]'
dmstat.centroid = False
dmstat()
E_b = dmstat.out_mean
if merged == True:
T_s = 0
T_b = 0
for obsid in merged_obs:
hdu = fits.open(obsid+'.fits')
hdr = hdu[0].header
T_s += hdr['TSTOP']-hdr['TSTART']
T_b += T_s
hdu.close()
#Calculate average effective exposures
dmstat.punlearn()
dmstat.infile = energy_range3+'_thresh.expmap[sky=region('+region+'.reg)]'
dmstat.centroid = False
dmstat()
E_s = dmstat.out_mean
dmstat.punlearn()
dmstat.infile = energy_range3+'_thresh.expmap[sky=region('+background+'.reg)]'
dmstat.centroid = False
dmstat()
E_b = dmstat.out_mean
#Calculate average photon energies in source and background aperature
if exposure == False:
dmtcalc.punlearn()
dmtcalc.infile = evt_file+".fits[energy="+energy_range+",sky=region("+region+".reg)]"
dmtcalc.outfile = region+"_source_energy.fits"
dmtcalc.expression = 'energy=1.6e-12*energy' #Convert to ergs
dmtcalc.clobber = True
dmtcalc()
dmstat.punlearn()
dmstat.infile = region+'_source_energy.fits[cols energy]'
dmstat()
eng_s = dmstat.out_mean
dmtcalc.punlearn()
dmtcalc.infile = evt_file+".fits[energy="+energy_range+",sky=region("+background+".reg)]"
dmtcalc.outfile = region+"_background_energy.fits"
dmtcalc.expression = 'energy=1.6e-12*energy' #Convert to ergs
dmtcalc.clobber = True
dmtcalc()
dmstat.punlearn()
dmstat.infile = region+'_background_energy.fits[cols energy]'
dmstat()
eng_b = dmstat.out_mean
#set flux_s,flux_b to zero to ignore exposure
flux_s = 1; flux_b = 1
if exposure == True:
eff2evt.punlearn()
eff2evt.infile = evt_file+".fits[energy="+energy_range+"][sky=region("+region+".reg)]"
eff2evt.outfile = region+"_source_effexp.fits"
eff2evt.clobber = True
eff2evt()
dmstat.punlearn()
dmstat.infile = region+'_source_effexp.fits[cols flux]'
dmstat()
flux_s = dmstat.out_mean
eff2evt.punlearn()
eff2evt.infile = evt_file+".fits[energy="+energy_range+"][sky=region("+background+".reg)]"
eff2evt.outfile = region+"_background_effexp.fits"
eff2evt.clobber = True
eff2evt()
dmstat.punlearn()
dmstat.infile = region+'_background_effexp.fits[cols flux]'
dmstat()
flux_b = dmstat.out_mean
#Conversely set eng_s,eng_b to one to signify we are using effective exposure
eng_s = 1; eng_b = 1
#Calculate energy flux and bounds
print("Setting aprates values")
aprates.punlearn()
aprates.conf = 0.90
aprates.n = counts
aprates.m = bg_counts
aprates.A_s = area
aprates.A_b = bg_area
aprates.alpha = alpha
aprates.beta = beta
aprates.T_s = T_s
aprates.T_b = T_b
aprates.E_s = E_s
aprates.E_b = E_b
aprates.eng_s = eng_s
aprates.eng_b = eng_b
aprates.flux_s = flux_s
aprates.flux_b = flux_b
aprates.outfile = 'aprates_'+region+'.par'
aprates.clobber = True
aprates.pdf = 'alternate'
print("Running aprates for flux value")
aprates()
return None
|
import primes
import matplotlib.pyplot as plt
P = primes.prange(1001)
Y = []
for i in range(len(P)-1):
Y.append(P[i+1]-P[i])
plt.plot(Y,'x')
plt.xlabel('n')
plt.ylabel('p(n)-p(n-1)')
plt.show()
|
# (C) Copyright 2021 Hewlett Packard Enterprise Development LP.
# Aruba Fabric Composer classes
import requests
from requests.auth import HTTPBasicAuth
from requests.structures import CaseInsensitiveDict
import classes.classes
import urllib3
import json
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def obtainafcToken(afcipaddress,afcusername,afcpassword):
response={}
try:
url="https://" + afcipaddress + "/api/auth/token"
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
headers['accept'] = "application/json; version=1.0"
headers["X-Auth-Username"] = afcusername
headers["X-Auth-Password"] = afcpassword
headers["Content-Length"] = "0"
response = requests.post(url, headers=headers, verify=False)
return response.text
except ConnectionError:
response.update({"message":"Connection error, no response from AFC"})
return response
except Exception as err:
response.update({"message":"Failed to establish a connection to AFC"})
return response
def checkafcToken(afcipaddress, afctoken):
response={}
try:
url="https://" + afcipaddress + "/api/ping"
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
headers['accept'] = "application/json; version=1.0"
headers["Authorization"] = afctoken
try:
result = requests.get(url, headers=headers, verify=False, timeout=2)
except:
response.update({"message":"No response from AFC"})
return response
if result:
response.update({"status_code": result.status_code})
else:
response.update({"message":"No response from AFC"})
return response
except ConnectionError:
response.update({"message":"Connection error, no response from AFC"})
return response
except Exception as err:
response.update({"message":"Failed to establish a connection to AFC"})
return response
except:
response.update({"message":"Failed to establish a connection to AFC"})
return response
def getRestafc(url):
afcvars=classes.classes.obtainVars('sysafc')
response={}
if len(afcvars)>0:
if isinstance(afcvars,str):
afcvars=json.loads(afcvars)
if not "afctoken" in afcvars:
response= obtainafcToken(afcvars['afcipaddress'],afcvars['afcusername'], afcvars['afcpassword'])
if isinstance(response,str):
response=json.loads(response)
afcvars.update({"afctoken":response['result']})
queryStr="update systemconfig set datacontent='{}' where configtype='sysafc'".format(json.dumps(afcvars))
classes.classes.sqlQuery(queryStr,"update")
url="https://" + afcvars['afcipaddress'] + "/" + url
result={}
headers = CaseInsensitiveDict()
headers["Content-Type"] = "application/json"
headers['accept'] = "application/json; version=1.0"
headers["Authorization"] = afcvars['afctoken']
response=checkafcToken(afcvars['afcipaddress'],afcvars['afctoken'])
if "status_code" in response:
if response['status_code']==204:
# Token seems to be valid, we can issue the query
response = requests.get(url, headers=headers, verify=False)
response = response.json()
if response['result']=="Authentication credential is not valid; please log in again":
# Authorization token is still not valid. We need to refresh
response= obtainafcToken(afcvars['afcipaddress'], afcvars['afcusername'], afcvars['afcpassword'])
if isinstance(response,str):
response = json.loads(response)
if "count" in response:
# Result is the afctoken. We need to update the afcvars
afcvars.update({"afctoken":response['result']})
queryStr="update systemconfig set datacontent='{}' where configtype='sysafc'".format(json.dumps(afcvars))
classes.classes.sqlQuery(queryStr,"update")
# And issue the get request again
headers["Authorization"] = afcvars['afctoken']
response = requests.get(url, headers=headers, verify=False)
response = response.json()
else:
# Statuscode is not 204, try to refresh the token
response= obtainafcToken(afcipaddress, afcusername, afcpassword)
if isinstance(response,str):
response = json.loads(response)
if "count" in response:
# Result is the afctoken. We need to update the afcvars
afcvars.update({"afctoken":response['result']})
queryStr="update systemconfig set datacontent='{}' where configtype='sysafc'".format(json.dumps(afcvars))
classes.classes.sqlQuery(queryStr,"update")
# And issue the get request again
headers["Authorization"] = afcvars['afctoken']
response = requests.get(url, headers=headers, verify=False)
response = response.json()
else:
# There is something wrong with the connectivity, return an error
response.update({"message":"Unable to obtain information, verify AFC credentials"})
else:
response.update({"message":"No AFC integration information available"})
return response
def getafcSwitches(formresult):
afcswitches=[]
formresult=formresult.to_dict(flat=True)
try:
queryStr="select jsondata,message from afc where infotype='fabrics'"
fabricInfo=classes.classes.sqlQuery(queryStr,"selectone")
afcfabrics=json.loads(fabricInfo['jsondata'])
except:
afcfabrics=[]
try:
queryStr="select jsondata,message from afc where infotype='switches'"
switchInfo=classes.classes.sqlQuery(queryStr,"selectone")
afcSwitches=json.loads(switchInfo['jsondata'])
except Exception as e:
print(e)
afcSwitches=[]
totalentries=0
if formresult and len(afcSwitches)>0:
entryperpage=int(formresult['entryperpage'])
if not "pageoffset" in formresult:
pageoffset=0
else:
pageoffset=int(formresult['pageoffset'])-1
afcfabric=formresult['afcfabric']
try:
if switchInfo['jsondata']!='"Authentication token header required"':
# First go through the list and filter on the fabric (if this is selected)
if afcfabric=="allfabrics":
if len(afcSwitches)>0:
totalentries=len(afcSwitches)
afcswitches=[afcSwitches[i:i+entryperpage] for i in range(0, len(afcSwitches), entryperpage)][pageoffset]
else:
totalentries=0
elif afcfabric=="unassigned":
# We need to filter out the switches that are not a member of the any fabric
if len(afcSwitches)>0:
for items in afcSwitches:
if items['fabric_uuid']=="":
# This switch is not assigned to any fabric. Add it to the (new) list of dicts
afcswitches.append(items)
totalentries=len(afcswitches)
if totalentries>0:
afcswitches=[afcswitches[i:i+entryperpage] for i in range(0, len(afcswitches), entryperpage)][pageoffset]
else:
# A fabric is selected, we need to filter out the switches that are not a member of the selected fabric
# the afcfabric uuid is the fabric uuid, this needs to be assigned to the switches
if len(afcSwitches)>0:
for items in afcSwitches:
if items['fabric_uuid']==afcfabric:
# This switch is assigned to the fabric. Add it to the (new) list of dicts
afcswitches.append(items)
totalentries=len(afcswitches)
if totalentries>0:
afcswitches=[afcswitches[i:i+entryperpage] for i in range(0, len(afcswitches), entryperpage)][pageoffset]
else:
# There is no valid information in the switch Information
afcswitches=[]
jsonData={}
jsonData['message']=switchInfo['jsondata']
afcswitches.append(jsonData)
afcfabric="allfabrics"
except:
entryperpage=10
pageoffset=0
afcswitches=[]
jsonData={}
jsonData['message']="No switch information"
afcswitches.append(jsonData)
afcfabric="allfabrics"
else:
entryperpage=10
pageoffset=0
try:
queryStr="select jsondata,message from afc where infotype='switches'"
switchInfo=classes.classes.sqlQuery(queryStr,"selectone")
if switchInfo['jsondata']!='"Authentication token header required"':
jsonData=json.loads(switchInfo['jsondata'])
if len(jsonData)>0:
totalentries=len(jsonData)
# and we should only show the first 10 switches in the list
afcswitches=[jsonData[i:i+entryperpage] for i in range(0, len(jsonData), entryperpage)][pageoffset]
else:
afcswitches=[]
jsonData={}
jsonData['message']=switchInfo['jsondata']
afcswitches.append(jsonData)
afcfabric="allfabrics"
except:
afcswitches=[]
jsonData={}
jsonData['message']="No switch information"
afcswitches.append(jsonData)
afcfabric="allfabrics"
return {'afcswitches':afcswitches, 'afcfabrics': afcfabrics, 'afcfabric': afcfabric, 'totalentries': totalentries, 'pageoffset': pageoffset, 'entryperpage': entryperpage}
def afcauditInfo(formresult):
formresult=formresult.to_dict(flat=True)
totalentries=0
if formresult:
entryperpage=formresult['entryperpage']
if not "pageoffset" in formresult:
pageoffset=0
else:
pageoffset=int(formresult['pageoffset'])-1
# Need to check if there are any search criteria. If not just select the last entries from the table
if formresult['searchRecordtype']!="" or formresult['searchStreamid']!="" or formresult['searchSeverity']!="" or formresult['searchDescription']!="":
constructQuery = " where"
else:
constructQuery = " "
if formresult['searchRecordtype']:
constructQuery += " record_type like'%" + formresult['searchRecordtype'] + "%' AND "
if formresult['searchStreamid']:
constructQuery += " stream_id like '%" + formresult['searchStreamid'] + "%' AND "
if formresult['searchSeverity']:
constructQuery += " severity='" + formresult['searchSeverity'] + "' AND "
if formresult['searchDescription']:
constructQuery += " description like'%" + formresult['searchDescription'] + "%' AND "
# We have to construct the query based on the formresult information (entryperpage, totalpages, pageoffset)
queryStr="select COUNT(*) as totalentries from afcaudit " + constructQuery[:-4]
navResult=classes.classes.navigator(queryStr,formresult)
totalentries=navResult['totalentries']
entryperpage=navResult['entryperpage']
# If the entry per page value has changed, need to reset the pageoffset
if formresult['entryperpage']!=formresult['currententryperpage']:
pageoffset=0
else:
pageoffset=navResult['pageoffset']
# We have to construct the query based on the formresult information (entryperpage, totalpages, pageoffset)
queryStr = "select * from afcaudit " + constructQuery[:-4] + " ORDER BY log_date DESC LIMIT {} offset {}".format(entryperpage,pageoffset)
afcauditInfo=classes.classes.sqlQuery(queryStr,"select")
else:
entryperpage=10
pageoffset=0
queryStr="select count(*) as totalCount from afcaudit"
auditCount=classes.classes.sqlQuery(queryStr,"selectone")
totalentries=auditCount['totalCount']
queryStr="SELECT * FROM afcaudit ORDER BY log_date DESC LIMIT {} OFFSET 0".format(entryperpage)
afcauditInfo=classes.classes.sqlQuery(queryStr,"select")
return {'auditInfo':afcauditInfo, 'totalentries': totalentries, 'pageoffset': pageoffset, 'entryperpage': entryperpage}
def afcswitchInfo(uuid):
afcswitchInfo={}
afcswitchInfo['info']={}
afcswitchInfo['portInfo']={}
queryStr="select jsondata,message from afc where infotype='switches'"
switchInfo=classes.classes.sqlQuery(queryStr,"selectone")
jsonData=json.loads(switchInfo['jsondata'])
# Obtain the right switch from the list
for items in jsonData:
if items['uuid']==uuid:
# This is the switch, return the information to the calling function
# Extract the port information and order the interface list
portInfo=sorted(items['ports'], key=lambda d: int(d['silkscreen']))
afcswitchInfo['info']=items.copy()
afcswitchInfo['portInfo']=portInfo.copy()
return afcswitchInfo
def afcvmwareInventory():
# Obtain the host information
queryStr="select jsondata,message from afc where infotype='vmwareinventory'"
vmInfo=classes.classes.sqlQuery(queryStr,"selectone")
if vmInfo==None:
vmInfo={}
vmInfo['jsondata']=[]
vmInfo['message']="No VMWare inventory information available"
vmTree=[]
else:
vmTree=[]
jsonData=json.loads(vmInfo['jsondata'])
for items in jsonData:
for items2 in items['hosts']:
vmConstruct = {}
# Host info
vmDict={}
vmConstruct['name'] = items2['name']
vmConstruct['itemtype']="host"
vmConstruct['power_state'] = items2['power_state']
vmConstruct['uuid'] = items2['uuid']
vmDict2={}
vmDict3={}
vmDict4={}
vmDict5={}
vmConstruct['children']=[]
for index, items3 in enumerate(items2['nics']):
# We need to create dictionaries in this list. This dictionary contains a data dictionary and children list(s)
vmDict[index]={}
vmDict[index]['itemtype']="nic"
vmDict[index]['name'] = items3['name'];
vmDict[index]['uuid'] = items3['uuid'];
vmDict[index]['mac_address'] = items3['mac_address'];
vmDict[index]['ip_address'] = items3['ip_address'];
vmDict[index]['vlan'] = items3['vlan'];
vmDict[index]['vni'] = items3['vni'];
vmDict[index]['vtep'] = items3['vtep'];
vmConstruct['children'].append(vmDict[index].copy())
for index2, items4 in enumerate(items3['portgroups']):
vmConstruct['children'][index]['children']=[]
vmDict2[index2]={}
vmDict2[index2]['itemtype']="portgroup"
vmDict2[index2]['name'] = items4['name'];
vmDict2[index2]['uuid'] = items4['uuid'];
vmDict2[index2]['type'] = items4['type'];
vmDict2[index2]['vlans'] = items4['vlans'];
vmConstruct['children'][index]['children'].append(vmDict2[index2].copy())
# There is only one vswitch assigned to a port group, therefore no for-next
vmConstruct['children'][index]['children'][index2]['children']=[]
vmDict3['itemtype']="vswitch"
vmDict3['name']=items4['vswitch']['name']
vmDict3['uuid']=items4['vswitch']['uuid']
vmDict3['type']=items4['vswitch']['type']
vmConstruct['children'][index]['children'][index2]['children'].append(vmDict3.copy())
vmConstruct['children'][index]['children'][index2]['children'][0]['children']=[]
for index3, items5 in enumerate(items4['vswitch']['nic']):
vmDict4[index3]={}
vmDict4[index3]['itemtype']="vnic"
vmDict4[index3]['name'] = items5['name'];
vmDict4[index3]['link_speed'] = items5['link_speed'];
vmDict4[index3]['connection_status'] = items5['connection_status'];
vmDict4[index3]['uuid'] = items5['uuid'];
vmDict4[index3]['mac_address'] = items5['mac_address'];
vmDict4[index3]['children']=[]
if len(items5['switch'])>0:
vmDict5={}
vmDict5['itemtype']="switch"
vmDict5['name'] = items5['switch']['hostname'];
vmDict5['switch_port_id'] = items5['switch_port_id'];
vmDict5['uuid'] = items5['switch']['uuid'];
vmDict5['ip_address'] = items5['switch']['ip_address'];
vmDict5['mac_address'] = items5['switch']['mac_address'];
vmDict5['serial_number'] = items5['switch']['serial_number'];
vmDict5['description'] = items5['switch']['description'];
vmDict5['role'] = items5['switch']['role'];
vmDict5['fabric'] = items5['switch']['fabric'];
vmDict5['fabric_class'] = items5['switch']['fabric_class'];
vmDict4[index3]['children'].append(vmDict5.copy())
vmConstruct['children'][index]['children'][index2]['children'][0]['children'].append(vmDict4[index3].copy())
vmTree.append(vmConstruct)
return json.dumps(vmTree)
|
import functools
import sys
import types
import inspect
__all__ = [
'partial_cls',
'register_partial_cls'
]
def partial_cls(base_cls, name, module, fix=None, default=None):
# helper function
def insert_if_not_present(dict_a, dict_b):
for kw,val in dict_b.items():
if kw not in dict_a:
dict_a[kw] = val
return dict_a
# helper function
def insert_call_if_present(dict_a, dict_b, callback):
for kw,val in dict_b.items():
if kw not in dict_a:
dict_a[kw] = val
else:
callback(kw)
return dict_a
# helper class
class PartialCls(object):
def __init__(self, base_cls, name, module, fix=None, default=None):
self.base_cls = base_cls
self.name = name
self.module = module
self.fix = [fix, {}][fix is None]
self.default = [default, {}][default is None]
if self.fix.keys() & self.default.keys():
raise TypeError('fix and default share keys')
# remove binded kw
self._allowed_kw = self._get_allowed_kw()
def _get_allowed_kw(self):
argspec = inspect.getfullargspec(base_cls.__init__)
args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations = argspec
if varargs is not None:
raise TypeError('partial_cls can only be used if __init__ has no varargs')
if varkw is not None:
raise TypeError('partial_cls can only be used if __init__ has no varkw')
if kwonlyargs is not None and kwonlyargs != []:
raise TypeError('partial_cls can only be used without kwonlyargs')
if args is None or len(args) < 1:
raise TypeError('seems like self is missing')
return [kw for kw in args[1:] if kw not in self.fix]
def _build_kw(self, args, kwargs):
# handle *args
if len(args) > len(self._allowed_kw):
raise TypeError("to many arguments")
all_args = {}
for arg, akw in zip(args, self._allowed_kw):
all_args[akw] = arg
# handle **kwargs
intersection = self.fix.keys() & kwargs.keys()
if len(intersection) >= 1:
kw = intersection.pop()
raise TypeError("`{}.__init__` got unexpected keyword argument '{}'".format(name, kw))
def raise_cb(kw):
raise TypeError("{}.__init__ got multiple values for argument '{}'".format(name, kw))
all_args = insert_call_if_present(all_args, kwargs, raise_cb)
# handle fixed arguments
def raise_cb(kw):
raise TypeError()
all_args = insert_call_if_present(all_args, self.fix, raise_cb)
# handle defaults
all_args = insert_if_not_present(all_args, self.default)
# handle fixed
all_args.update(self.fix)
return all_args
def build_cls(self):
def new_init(self_of_new_cls, *args, **kwargs):
combined_args = self._build_kw(args=args, kwargs=kwargs)
#call base cls init
super(self_of_new_cls.__class__, self_of_new_cls).__init__(**combined_args)
return type(name, (self.base_cls,), {
'__module__': self.module,
'__init__' : new_init
})
return cls
return PartialCls(base_cls=base_cls, name=name, module=module,
fix=fix, default=default).build_cls()
def register_partial_cls(base_cls, name, module, fix=None, default=None):
module_dict = sys.modules[module].__dict__
generatedClass = partial_cls(base_cls=base_cls,name=name, module=module,
fix=fix, default=default)
module_dict[generatedClass.__name__] = generatedClass
del generatedClass
if __name__ == "__main__":
class Conv(object):
def __init__(self, dim, activation, stride=1):
print(f"dim {dim} act {activation} stride {stride}")
Conv2D = partial_cls(Conv,'Conv2D',__name__, fix=dict(dim=2), default=dict(stride=2))
#obj = Conv2D(activation='a')
#obj = Conv2D('a',activation='a', stride=3)
obj = Conv2D('fu','bar')
|
# create executable binary from python code, toy example
# inspired by http://masnun.rocks/2016/10/01/creating-an-executable-file-using-cython/
# cython --embed -o hello_world.c hello_world.py
# gcc -v -Os \
# -L /usr/local/Frameworks/Python.framework/Versions/3.7/lib/ \
# -I /usr/local/Frameworks/Python.framework/Versions/3.7/include/python3.7m/ \
# -lpython3.7 -lpthread -lm -lutil -ldl
# -o hello \
# hello_world.c
#gcc -v -Os -I /Users/xxx/.pyenv/versions/3.5.1/include/python3.5m -L /usr/local/Frameworks/Python.framework/Versions/3.5/lib -o test test.c -lpython3.5 -lpthread -lm -lutil -ldl
print("Hello World!")
|
pre = '/Users/phr/Desktop'
# train_file = pre+'/Parser/files/small-bin.txt'
# train_file = pre+'/Parser/files/medium.txt'
dev_file = pre+'/Parser/files/small-dev.txt'
train_file = pre+'/Parser/files/train.txt'
# train_file = pre+'/Parser/files/dev.txt'
output_dir = pre+'/Parser/output/'
import os
if not os.path.exists(output_dir):
os.makedirs(output_dir)
print('Output directory created.')
nonterminal_cutoff = 10
terminal_cutoff = 5
train = None
dev = None
nonterminal_map = None
terminal_map = None
pcfg = None
I, O = None, None
Inode, Onode = None, None
singular_value_cutoff = 0.01
max_state = 32
proj = None
lpcfg = None
rule3s_lookupC = None
rule1s_lookup = None
# Smooth params
C = 30
v = 0.6
unary_cutoff = 1000
prune_cutoff = 1e-5
numba_ready = False
import torch
def save():
print('Saving parameters.')
torch.save(nonterminal_map, output_dir+'nonterminal_map.pt')
torch.save(terminal_map, output_dir + 'terminal_map.pt')
torch.save(pcfg, output_dir + 'pcfg.pt')
torch.save(lpcfg, output_dir + 'lpcfg.pt')
torch.save(rule3s_lookupC, output_dir + 'rule3s_lookupC.pt')
torch.save(rule1s_lookup, output_dir + 'rule1s_lookup.pt')
print('Done!')
def load():
global nonterminal_map, terminal_map
global pcfg, lpcfg
global rule3s_lookupC, rule1s_lookup
nonterminal_map = torch.load(output_dir+'nonterminal_map.pt')
terminal_map = torch.load(output_dir+'terminal_map.pt')
pcfg = torch.load(output_dir+'pcfg.pt')
lpcfg = torch.load(output_dir + 'lpcfg.pt')
rule3s_lookupC = torch.load(output_dir+'rule3s_lookupC.pt')
rule1s_lookup = torch.load(output_dir+'rule1s_lookup.pt')
|
import os
import numpy.ctypeslib as npct
from ctypes import *
import csv
import numpy as np
from contextlib import contextmanager
import sys
def getColumn(filename, column):
results = csv.reader(open(filename), delimiter=",")
# next(results, None) # skip the headers
return [result[column] for result in results]
# This function converts an input numpy array into a different
# data type and ensure that it is contigious.
def convert_type(in_array, new_dtype):
ret_array = in_array
if not isinstance(in_array, np.ndarray):
ret_array = np.array(in_array, dtype=new_dtype)
elif in_array.dtype != new_dtype:
ret_array = np.array(ret_array, dtype=new_dtype)
if ret_array.flags['C_CONTIGUOUS'] == False:
ret_array = np.ascontiguousarray(ret_array)
return ret_array
#from stackoverflow
# https://stackoverflow.com/questions/5081657/how-do-i-prevent-a-c-shared-library-to-print-on-stdout-in-python
def redirect_stdout():
print ("Verbose mode is false. Redirecting C shared library stdout to /dev/null")
sys.stdout.flush() # <--- important when redirecting to files
newstdout = os.dup(1)
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, 1)
os.close(devnull)
sys.stdout = os.fdopen(newstdout, 'w')
#from stackoverflow
# https://stackoverflow.com/questions/5081657/how-do-i-prevent-a-c-shared-library-to-print-on-stdout-in-python
@contextmanager
def stdout_redirected(to=os.devnull):
'''
import os
with stdout_redirected(to=filename):
print("from Python")
os.system("echo non-Python applications are also supported")
'''
print ("Verbose mode is false. Redirecting C shared library stdout to /dev/null")
fd = sys.stdout.fileno()
##### assert that Python and C stdio write using the same file descriptor
####assert libc.fileno(ctypes.c_void_p.in_dll(libc, "stdout")) == fd == 1
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
with open(to, 'w') as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as
# CLOEXEC may be different
#from here: https://www.py4u.net/discuss/15884
class SuppressStream(object):
def __init__(self, stream=sys.stderr):
self.orig_stream_fileno = stream.fileno()
def __enter__(self):
self.orig_stream_dup = os.dup(self.orig_stream_fileno)
self.devnull = open(os.devnull, 'w')
os.dup2(self.devnull.fileno(), self.orig_stream_fileno)
def __exit__(self, type, value, traceback):
os.close(self.orig_stream_fileno)
os.dup2(self.orig_stream_dup, self.orig_stream_fileno)
os.close(self.orig_stream_dup)
self.devnull.close()
#https://stackoverflow.com/questions/5081657/how-do-i-prevent-a-c-shared-library-to-print-on-stdout-in-python
class HideOutput(object):
'''
A context manager that block stdout for its scope, usage:
with HideOutput():
os.system('ls -l')
'''
def __init__(self, *args, **kw):
sys.stdout.flush()
self._origstdout = sys.stdout
self._oldstdout_fno = os.dup(sys.stdout.fileno())
self._devnull = os.open(os.devnull, os.O_WRONLY)
def __enter__(self):
self._newstdout = os.dup(1)
os.dup2(self._devnull, 1)
os.close(self._devnull)
sys.stdout = os.fdopen(self._newstdout, 'w')
def __exit__(self, exc_type, exc_val, exc_tb):
sys.stdout = self._origstdout
sys.stdout.flush()
os.dup2(self._oldstdout_fno, 1)
def computeIndexRangesForEachObject(objId):
start_index_arr=[]
end_index_arr=[]
unique_obj_ids_arr=[]
lastId=objId[0]
unique_obj_ids_arr.append(objId[0])
start_index_arr.append(0)
for x in range(0, len(objId)):
if(objId[x]!=lastId):
end_index_arr.append(x-1)
start_index_arr.append(x)
lastId=objId[x]
#update the list of unique object ids
unique_obj_ids_arr.append(objId[x])
#last one needs to be populated
end_index_arr.append(len(objId)-1)
start_index_arr=np.asarray(start_index_arr, dtype=int)
end_index_arr=np.asarray(end_index_arr, dtype=int)
unique_obj_ids_arr=np.asarray(unique_obj_ids_arr)
return start_index_arr, end_index_arr, unique_obj_ids_arr
#https://www.codeforests.com/2020/11/05/python-suppress-stdout-and-stderr/
@contextmanager
def nullify_output(suppress_stdout=True, suppress_stderr=True):
stdout = sys.stdout
stderr = sys.stderr
devnull = open(os.devnull, "w")
try:
if suppress_stdout:
sys.stdout = devnull
if suppress_stderr:
sys.stderr = devnull
yield
finally:
if suppress_stdout:
sys.stdout = stdout
if suppress_stderr:
sys.stderr = stderr
def enumerateObjects(start_index_arr, end_index_arr):
enumObjectId=[]
for x in range (start_index_arr.size):
numElems=end_index_arr[x]-start_index_arr[x]+1
# print("Num elems: %d" %(numElems))
enumObjectId.extend(numElems*[x])
enumObjectId=np.asarray(enumObjectId, dtype=int)
# print("Total number of lines after enumeration: %d" %(enumObjectId.size))
# print("Total number of unique objects after enumeration: %d" %(np.unique(enumObjectId).size))
return enumObjectId
#Use the formulation in Richards et al. 2011
def computeNumFreqAuto(objId, timeX, fmin, fmax):
start_index_arr, end_index_arr, _ = computeIndexRangesForEachObject(objId)
timeXLocal=np.asfarray(timeX)
observing_window_arr=[]
for x in range (0, start_index_arr.size):
idxStart=start_index_arr[x]
idxEnd=end_index_arr[x]
observing_window_arr.append(timeXLocal[idxEnd]-timeXLocal[idxStart])
observing_window_arr=np.asarray(observing_window_arr, dtype=float)
maximumObservingWindow=np.max(observing_window_arr)
deltaf=0.1/maximumObservingWindow
num_freqs=(fmax-fmin)/deltaf
num_freqs=int(num_freqs)
print("*********************")
print("Automatically generating the number of frequencies based on maximum observing window:")
print("Max. Observing Window: %f, Delta f: %f" %(maximumObservingWindow, deltaf))
print("Number of frequencies: ", num_freqs)
print("*********************")
return num_freqs
#wrapper to enable the verbose option
def lombscargle(objId, timeX, magY, minFreq, maxFreq, error, mode, magDY=None, freqToTest="auto", dtype="float", verbose=False):
if(verbose==False):
with HideOutput():
ret_uniqueObjectIdsOrderedWrapper, ret_periodsWrapper, ret_pgramWrapper = lombscarglemain(objId, timeX, magY, minFreq, maxFreq, error, mode, magDY, freqToTest, dtype)
else:
ret_uniqueObjectIdsOrderedWrapper, ret_periodsWrapper, ret_pgramWrapper = lombscarglemain(objId, timeX, magY, minFreq, maxFreq, error, mode, magDY, freqToTest, dtype, verbose)
return ret_uniqueObjectIdsOrderedWrapper, ret_periodsWrapper, ret_pgramWrapper
#main L-S function
def lombscarglemain(objId, timeX, magY, minFreq, maxFreq, error, mode, magDY=None, freqToTest="auto", dtype="float", verbose=False):
#store the minimum/maximum frequencies (needed later for period calculation)
minFreqStandard=minFreq
maxFreqStandard=maxFreq
#convert oscillating frequencies into angular
minFreq=2.0*np.pi*minFreq
maxFreq=2.0*np.pi*maxFreq
###############################
#Check for valid parameters and set verbose mode and generate frequencies for auto mode
#prevent C output from printing to screen
# if (verbose==False):
# redirect_stdout()
# stdout_redirected()
#if the user doesn't specify the number of frequencies
if (freqToTest=="auto"):
freqToTest=computeNumFreqAuto(objId, timeX, minFreqStandard, maxFreqStandard)
#check which mode to use in the C shared library
# 1- GPU Batch of Objects Lomb-Scargle")
# 2- GPU Single Object Lomb-Scargle")
# 3- None
# 4- CPU Batch of Objects Lomb-Scargle")
# 5- CPU Single Object Lomb-Scargle")
numObjects=np.size(np.unique(objId))
if (mode=="GPU" and numObjects>1):
setmode=1
elif (mode=="GPU" and numObjects==1):
setmode=2
elif (mode=="CPU" and numObjects>1):
setmode=4
elif (mode=="CPU" and numObjects==1):
setmode=5
#check that if the error is true, that magDY is not None (None is the default parameter)
if (error==True and magDY==None):
print("[Python] Error: No input error array, but the error mode is True. Set error mode to False.")
exit(0)
###############################
#enumerate objId so that we can process objects with non-numeric Ids
#original objects are stored in ret_uniqueObjectIdsOrdered
start_index_arr, end_index_arr, ret_uniqueObjectIdsOrdered = computeIndexRangesForEachObject(objId)
objId = enumerateObjects(start_index_arr, end_index_arr)
# Create variables that define C interface
array_1d_double = npct.ndpointer(dtype=c_double, ndim=1, flags='CONTIGUOUS')
array_1d_float = npct.ndpointer(dtype=c_float, ndim=1, flags='CONTIGUOUS')
array_1d_unsigned = npct.ndpointer(dtype=c_uint, ndim=1, flags='CONTIGUOUS')
#load the shared library (either the noerror/error and float/double versions)
lib_path = os.getcwd()
if (error==False and dtype=="float"):
liblombscarglenoerrorfloat = npct.load_library('libpylsnoerrorfloat.so', lib_path)
elif (error==False and dtype=="double"):
liblombscarglenoerrordouble = npct.load_library('libpylsnoerrordouble.so', lib_path)
elif (error==True and dtype=="float"):
liblombscargleerrorfloat = npct.load_library('libpylserrorfloat.so', lib_path)
elif (error==True and dtype=="double"):
liblombscargleerrordouble = npct.load_library('libpylserrordouble.so', lib_path)
#total number of rows in file
sizeData=len(objId)
print("[Python] Number of rows in file: %d" %(sizeData))
#convert input from lists to numpy arrays
objId=np.asarray(objId, dtype=int)
timeX=np.asfarray(timeX)
magY=np.asfarray(magY)
if(error==True):
magDY=np.asfarray(magDY)
#if error is false, we still need to send dummy array to C shared library
#set all values to 1.0, although we don't use it for anything
if(error==False):
magDY=np.full(objId.size, 1.0)
#convert to CTYPES
if (dtype=="float"):
c_objId=convert_type(objId, c_uint)
c_timeX=convert_type(timeX, c_float)
c_magY=convert_type(magY, c_float)
c_magDY=convert_type(magDY, c_float)
elif (dtype=="double"):
c_objId=convert_type(objId, c_uint)
c_timeX=convert_type(timeX, c_double)
c_magY=convert_type(magY, c_double)
c_magDY=convert_type(magDY, c_double)
df=(maxFreq-minFreq)/freqToTest*1.0
dfstandard=(maxFreqStandard-minFreqStandard)/freqToTest*1.0
# Allocate arrays for results
uniqueObjects=np.size(np.unique(objId))
print("[Python] Unique objects: %d" % (uniqueObjects))
if (dtype=="float"):
ret_pgram = np.zeros(uniqueObjects*freqToTest, dtype=c_float)
pgramDataGiB=((ret_pgram.size*4.0)/(1024*1024*1024))
elif (dtype=="double"):
ret_pgram = np.zeros(uniqueObjects*freqToTest, dtype=c_double)
pgramDataGiB=((ret_pgram.size*8.0)/(1024*1024*1024))
print("[Python] Size of pgram in elems: %d (%f GiB)" %(ret_pgram.size, pgramDataGiB))
#without error -- float
if (error==False and dtype=="float"):
#define the argument types
liblombscarglenoerrorfloat.LombScarglePy.argtypes = [array_1d_unsigned, array_1d_float, array_1d_float,
array_1d_float, c_uint, c_double, c_double, c_uint, c_int, array_1d_float]
#call the library
liblombscarglenoerrorfloat.LombScarglePy(c_objId, c_timeX, c_magY, c_magDY, c_uint(sizeData), c_double(minFreq), c_double(maxFreq), c_uint(freqToTest), c_int(setmode), ret_pgram)
#without error -- double
if (error==False and dtype=="double"):
#define the argument types
liblombscarglenoerrordouble.LombScarglePy.argtypes = [array_1d_unsigned, array_1d_double, array_1d_double,
array_1d_double, c_uint, c_double, c_double, c_uint, c_int, array_1d_double]
#call the library
liblombscarglenoerrordouble.LombScarglePy(c_objId, c_timeX, c_magY, c_magDY, c_uint(sizeData), c_double(minFreq), c_double(maxFreq), c_uint(freqToTest), c_int(setmode), ret_pgram)
#with error -- float
if (error==True and dtype=="float"):
#define the argument types
liblombscargleerrorfloat.LombScarglePy.argtypes = [array_1d_unsigned, array_1d_float, array_1d_float,
array_1d_float, c_uint, c_double, c_double, c_uint, c_int, array_1d_float]
#call the library
liblombscargleerrorfloat.LombScarglePy(c_objId, c_timeX, c_magY, c_magDY, c_uint(sizeData), c_double(minFreq), c_double(maxFreq), c_uint(freqToTest), c_int(setmode), ret_pgram)
#with error -- double
if (error==True and dtype=="double"):
#define the argument types
liblombscargleerrordouble.LombScarglePy.argtypes = [array_1d_unsigned, array_1d_double, array_1d_double,
array_1d_double, c_uint, c_double, c_double, c_uint, c_int, array_1d_double]
#call the library
liblombscargleerrordouble.LombScarglePy(c_objId, c_timeX, c_magY, c_magDY, c_uint(sizeData), c_double(minFreq), c_double(maxFreq), c_uint(freqToTest), c_int(setmode), ret_pgram)
#for convenience, reshape the pgrams as a 2-D array
ret_pgram=ret_pgram.reshape([uniqueObjects, freqToTest])
ret_periods=np.zeros(uniqueObjects)
#to compute best periods, work back in regular oscillating frequencies (not angular)
for x in range(0, uniqueObjects):
# ret_periods[x]=1.0/(minFreq+(df*np.argmax(ret_pgram[x])))
ret_periods[x]=1.0/(minFreqStandard+(dfstandard*np.argmax(ret_pgram[x])))
if(uniqueObjects>1):
print("[Python] Sum of all periods: %f" %(np.sum(ret_periods)))
else:
print("[Python] Period for object: %f" %ret_periods[0])
# sys.stdout.flush()
return ret_uniqueObjectIdsOrdered, ret_periods, ret_pgram
|
import abc
import traitlets.config
class EventSink(traitlets.config.LoggingConfigurable):
"""
A sink for JupyterLab telemetry events.
Subclasses should do something useful with the event that are recieved
(such as sending it to an event store or writing them to a file).
"""
@abc.abstractmethod
def handle_event(self, event: dict, metadata: dict):
raise NotImplementedError()
|
SID = ''
AUTH_TOKEN = ''
FROM_NUMBER =''
TO_NUMBER = ''
API_KEY=''
DEVICE_NAME='BOLT'
|
#!/usr/bin/env python3
from pymongo import MongoClient
####
# Indicate start
####
print("============================")
print(" Starting my_python_app ")
print("============================")
print('\n')
####
# Main start function
####
def main():
# MongoDB Connection
mongo_client = MongoClient(MONGODB_URL)
db = mongo_client[DATABASE]
patients_collection = db[COLLECTION]
# Define the query
# Comment/uncomment one of the following query variables to either enable a filter or return all
# Projection returns a subset of fields
query = { 'year' : {'$gte' : 2015}, 'cast' : 'Chris Pratt' }
#query = {}
projection = {'_id': 0, 'title' : 1, 'cast' : 1}
for doc in patients_collection.find(query, projection):
print(doc)
####
# Constants
####
MONGODB_URL = '### Your MongoDB Atlas URL ###'
DATABASE = 'sample_mflix'
COLLECTION = 'movies'
####
# Main
####
if __name__ == '__main__':
main()
####
# Indicate End
####
print('\n')
print("============================")
print(" Ending my_python_app ")
print("============================")
|
# -*- coding:utf8 -*-
import fixpath
import okpy
G_ERROR_NUMBER = None
def outer_exception():
return 1/0-3
def catcher_exception(n):
return 1/(n-2)
@okpy.ready
def are_you_ready():
global G_ERROR_NUMBER
G_ERROR_NUMBER = 2
@okpy.cleaner
def i_am_cleaner():
global G_ERROR_NUMBER
G_ERROR_NUMBER = None
@okpy.test
def check_it_true():
assert 1 != 2
@okpy.test
def check_it_wrong():
assert (1 - 2) > 0
@okpy.test
def check_it_no_name_but_doc():
"""
this is a __doc__
"""
a = outer_exception()
@okpy.test
def check_it_catch():
assert okpy.catch(catcher_exception, G_ERROR_NUMBER) in (ZeroDivisionError,)
@okpy.benchmark(n=1000, timeout=1000)
def benchmark_is_ok():
n = 0
for x in xrange(100):
n += x
@okpy.benchmark(n=1, timeout=1)
def benchmark_is_fail():
import time
time.sleep(3)
if __name__ == '__main__':
okpy.run()
|
from .quick_logger import *
|
#! /usr/bin/env python3
#
# Copyright (C) 2018 Gaëtan Harter <gaetan.harter@fu-berlin.de>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
#
"""
lazysponge
Adaptation of moreutils `sponge` with added functionnality that it does not
modify the output file if the content would be unchanged.
Description
-----------
Reads standard input and writes it to the specified file if its content was
different.
The file is not changed if the content is the same so modification timestamp is
unchanged.
Note
----
It only works with input provided by a `pipe` and not interractive input.
The reason is that `ctrl+c` would not be handled properly in that case.
Usage
-----
usage: lazysponge.py [-h] outfile
Soak up all input from stdin and write it to <outfile> if it differs from
previous content. If the content is the same, file is not modified.
positional arguments:
outfile Output file
optional arguments:
-h, --help show this help message and exit
"""
import os
import sys
import argparse
import hashlib
DESCRIPTION = ('Soak up all input from stdin and write it to <outfile>'
' if it differs from previous content.\n'
' If the content is the same, file is not modified.')
PARSER = argparse.ArgumentParser(description=DESCRIPTION)
PARSER.add_argument('outfile', help='Output file')
PARSER.add_argument('--verbose', '-v', help='Verbose output', default=False,
action='store_true')
def _print_hash_debug_info(outfilename, oldbytes, newbytes):
"""Print debug information on hashs."""
oldhash = hashlib.md5(oldbytes).hexdigest() if oldbytes is not None else ''
newhash = hashlib.md5(newbytes).hexdigest()
if oldbytes == newbytes:
msg = 'Keeping old {} ({})'.format(outfilename, oldhash)
else:
msg = 'Replacing {} ({} != {})'.format(outfilename, oldhash, newhash)
print(msg, file=sys.stderr)
def main():
"""Write stdin to given <outfile> if it would change its content."""
opts = PARSER.parse_args()
# No support for 'interactive' input as catching Ctrl+c breaks in 'read'
if os.isatty(sys.stdin.fileno()):
print('Interactive input not supported. Use piped input',
file=sys.stderr)
print(' echo message | {}'.format(' '.join(sys.argv)),
file=sys.stderr)
exit(1)
try:
with open(opts.outfile, 'rb') as outfd:
oldbytes = outfd.read()
except FileNotFoundError:
oldbytes = None
stdinbytes = sys.stdin.buffer.read()
if opts.verbose:
_print_hash_debug_info(opts.outfile, oldbytes, stdinbytes)
if oldbytes == stdinbytes:
return
with open(opts.outfile, 'wb') as outfd:
outfd.write(stdinbytes)
if __name__ == '__main__':
main()
|
from enum import IntEnum
from blatann.nrf.nrf_types import *
from blatann.nrf.nrf_dll_load import driver
import blatann.nrf.nrf_driver_types as util
class BLEEvent(object):
evt_id = None
def __init__(self, conn_handle):
self.conn_handle = conn_handle
def __str__(self):
return self.__repr__()
class EvtUserMemoryRequest(BLEEvent):
evt_id = driver.BLE_EVT_USER_MEM_REQUEST
def __init__(self, conn_handle, request_type):
super(EvtUserMemoryRequest, self).__init__(conn_handle)
self.type = request_type
@classmethod
def from_c(cls, event):
return cls(event.evt.common_evt.conn_handle, event.evt.common_evt.params.user_mem_request.type)
def __repr__(self):
return "{}(conn_handle={!r}, type={!r})".format(self.__class__.__name__, self.conn_handle, self.type)
class EvtTxComplete(BLEEvent):
evt_id = driver.BLE_EVT_TX_COMPLETE
def __init__(self, conn_handle, count):
super(EvtTxComplete, self).__init__(conn_handle)
self.count = count
@classmethod
def from_c(cls, event):
tx_complete_evt = event.evt.common_evt.params.tx_complete
return cls(conn_handle=event.evt.common_evt.conn_handle,
count=tx_complete_evt.count)
def __repr__(self):
return "{}(conn_handle={!r}, count={!r})".format(self.__class__.__name__, self.conn_handle, self.count)
|
# DO NOT EDIT THIS FILE. This file will be overwritten when re-running go-raml.
import os
from path_if import path_if
from sanic import Sanic
from sanic.response import json
dir_path = os.path.dirname(os.path.realpath(__file__))
app = Sanic(__name__)
app.blueprint(path_if)
if __name__ == "__main__":
app.run(debug=True, port=5000, workers=2)
|
"""SMTP Email connector to gmail.
Sends email to organization member via gmail SMTP using
config.py credentials
"""
import json
import requests
from requests.auth import HTTPBasicAuth
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
import config
def getEmailAddress(url):
member_response = requests.get(url, auth=HTTPBasicAuth(config.username, config.password))
member_data = json.loads(member_response.text)
return member_data["email"]
def send(profile):
fromaddr = config.gmailuser
toaddr = getEmailAddress(profile)
willSend = str(raw_input("Send email to " + toaddr + "? (Y/N)" ))
if willSend.lower() != "y":
print("Skipping email to " + toaddr)
return
else:
#userprofile = "https://github.com/" + username
username = profile.rsplit('/', 1)[-1]
profilelink = "https://github.com/account"
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = "Profile update request"
body = "Noticed you don't have a profile name on your Github account. Here's a link to your github profile " + profilelink + ". I'd recommend going to Github through your browser - it's safer that way. Links in emails are not secure."
msg.attach(MIMEText(body, 'plain'))
server = smtplib.SMTP('smtp.gmail.com:587')
server.ehlo()
server.starttls()
try:
server.login(fromaddr, config.gmailpass)
except smtplib.SMTPAuthenticationError:
print ("\nPlease verify gmail credentials in config.py\n"
"verify less secure apps is enabled and\n"
"Captcha is disabled for gmail SMTP provider\n"
"see readme for more details\n")
raise SystemExit
text = msg.as_string()
server.sendmail(fromaddr, toaddr, text)
server.quit()
print("Email sent to " + toaddr)
return
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import sys
import math
ayuda = f'''
NOMBRE
{sys.argv[0]}
Aplicación python para calcular los ajustes del PLL para MCUs:
dsPIC33Ep y PIC24EP
SINOPSIS
{sys.argv[0]} [-h|--help]
{sys.argv[0]} [[-fi] <Freq_de_entrada>[k|K|M]] [-fo] <Freq_de_salida>[k|K|M]]
Se requiere al menos un argumento de entrada para indicar la frecuencia de
salida requerida. La frecuencia ha de especificarse como un número que
expresará Hz, kHz o MHz añadiendo los sufijos correspondientes.
por ejemplo las siguientes llamadas son eqivalentes:
{sys.argv[0]} 22118400
{sys.argv[0]} 22118.4K
{sys.argv[0]} 22.1184M
Por omisión se toma la frecuencia de entrada del oscilador INTRC = 7,37MHz.
También puede especificar la frecuencia de entrada, y puesto que no existe
solapamiento entre los intervalos de definición de ambas frecuencias,
pueden especificarse ambas sin más, y en cualquier orden. Por ejemplo:
{sys.argv[0]} 8M 100M
{sys.argv[0]} 100M 8M
En ambos casos la frecuencia de entrada es de 8MHz y la de salida de 100MHz.
Pero también puede ser mas explícito, asignando las frecuencas de entrada
y salida mediante los parámetros: -fo, -fi, --fout, --fin. Por ejemplo:
{sys.argv[0]} -fi 8M -fo 100M
PARAMETROS:
-h, --help: Muestran esta ayuda
-fo, --fout: Antecede al parámetro que especifica la frecuencia de salida
-fi, --fin: Antecede al parámetro que especifica la frecuencia de entrada
'''
import sys
import math
'''\
Convierte una cadena que contiene un valor numérico que puede contener, o no,
un sufijo para expresar Hz, kHz o MHz, en un número entero.
Si la conversión falla lanzará una excepción
@args
token Cadena del tipo n[0-9][k,K,M]
'''
def getHz(token):
if token[-1] == 'k' or token[-1] == 'K':
return round(abs(float(token[:-1])) * 1000)
elif token[-1] == 'M':
return round(abs(float(token[:-1])) * 1000000)
else:
return round(abs(float(token)))
def pll(fi, prescaler, postscaler, divider):
return fi * divider / prescaler / postscaler
def desviacion(objetivo, efectivo):
return abs((efectivo - objetivo) / objetivo)
# ------------------------------------------------------------------------------
# Calcula los parámetros de configuración del PLL: pll_prescaler,
# pll_postscaler y pll_feedback_divisor,
# El cálculo tratará de obtener la mejor aproximación a la frecuencia de salida
# requerida.
# En algunos casos habrá mas de una solución, en estos casos se ofrecerá
# la solución en la que el punto de trabajo de fplli y fvco estén mas
# centrados respecto de su intervalo de funcionamiento.
# definición.
# @arguments:
# fin: Frecuencia de entrada al prescaler.
# fout: Frecuencia requerida de salida
# @return:
# Devuelve una tupla cde tres números que se han de interpretar como:
# (pll_prescaler, pll_postscaler, pll_feedback_divisor) o
# (N1, N2, M)
#
def pll_settings(fin, fout):
# Restricciones aplicadas al cálculo.
fplli_min = 800000 # Frecuencia mínima de entrada al PLL
fplli_max = 8000000 # Frecuencia máxima de salida al PLL
fplli_mid = (fplli_min + fplli_max) / 2
fvco_min = 120000000 # Frecuencia mínima de salida del VCO
fvco_max = 340000000 # Frecuencia máxima de salida del VCO
fvco_mid = (fvco_min + fvco_max) / 2
pllpre_min = 2
pllpre_max = 33
###pllpre_mid = (pllpre_min + pllpre_max)/2
plldiv_min = 2
plldiv_max = 513
###plldiv_mid = (plldiv_min + plldiv_max)/2
pllpost_set = [2, 4, 8]
###pllpost_mid = 4
# Antes de comenzar a iterar se determinan los límites en los que el cálculo
# queda dentro de las restricciones físicas.
n1_min = math.ceil(fin / fplli_max)
if n1_min < pllpre_min:
n1_min = pllpre_min
n1_max = math.floor(fin / fplli_min)
if n1_max > pllpre_max:
n1_max = pllpre_max
n2_min = math.ceil(fvco_min / fout)
if n2_min < pllpost_set[0]:
n2_min = pllpost_set[0]
n2_max = math.floor(fvco_max / fout)
if n2_max > pllpost_set[-1]:
n2_max = pllpost_set[-1]
# Variables a empleadas en las iteraciones de cálculo.
pll_n1 = 0
pll_n2 = 0
fout_error = fout
test_fout = 0
for test_post in pllpost_set:
# sólo se itera sobre los valores previamente acotados para descartar las soluciones
# que no encajan con las restricciones físicas del mcu.
if test_post < n2_min:
continue
elif test_post > n2_max:
break
for test_pre in range(n1_min, n1_max + 1):
for test_div in range(plldiv_min, plldiv_max + 1):
test_fout = pll(fin, test_pre, test_post, test_div)
test_error = abs(fout - test_fout)
if test_error < fout_error:
fout_error = test_error
pll_n1 = test_pre
pll_n2 = test_post
pll_div = test_div
elif test_error == fout_error:
# Desviación del punto de funcionamiento medio de la solución actual
desv_fplli = desviacion(fplli_mid, fin / pll_n1)
test_fplli = desviacion(fplli_mid, fin / test_pre)
desv_fvco = desviacion(fvco_mid, fout * pll_n2)
test_fvco = desviacion(fvco_mid, fout * test_post)
if (test_fplli * test_fvco) < (desv_fplli * desv_fvco):
fout_error = test_error
pll_n1 = test_pre
pll_n2 = test_post
pll_div = test_div
return pll_n1, pll_n2, pll_div
def main():
# límites:
pllfin_min = 1600000
pllfin_max = 8000000
pllfout_min = 15000000
pllfout_max = 140000000
# Variables de entrada para hacer los cálculos:
# pll_fin: Por omisión, se asume conectado a INTRC = 7,37MHz
pll_fin = int(7.37e6)
#
# pll_fout: debe ser establecida mediante parámetro de línea de comando
pll_fout = 0
# Parámetros del ejecución:
verbose = 5
# Evaluación de parámetros:
if len(sys.argv) < 2:
msg_error = f'''
ERROR: Se requiere al menos un parámetro para especificar la frecuencia
de salida requerida en Hz, kHz o MHz. Por ejemplo:
{sys.argv[0]} 100M
Proporcionará los ajustes para obtener una frecuencia de 100MHz a
partir del oscilador interno INTRC de 7,37MHz
Utilice el parámetro -h o --help para mas información.\n '''
print(msg_error)
exit()
# Variable para para asignar correctamente el significado de cada token
# en los argumentos compuestos.
next_token = 'name'
for token in sys.argv[1:]:
if next_token == 'name':
if token == '-h' or token == '--help':
print(ayuda)
exit(-1)
elif token == '-fo' or token == '-fout' or token == '--fout':
next_token = 'fout_val'
elif token == '-fi' or token == '-fin' or token == '--fin':
next_token = 'fin_val'
# En última instancia se aceptan argumentos sin nombre tales que
# que se interpretarán como una especificación de frecuencia
# de entrada o salida en función a su valor.
else:
try:
freq = getHz(token)
if freq >= pllfout_min and freq <= pllfout_max:
pll_fout = freq
elif freq >= pllfin_min and freq <= pllfin_max:
pll_fin = freq
else:
msg_error = f'''
ERROR: La frecuencia: {token} está fuera del intervalo aceptable para las
frecuencias de entrada y de salida. '''
print(msg_error)
except ValueError:
msg_error = f'''
ERROR: No se reconoce el argumento: {token} como un argumento válido.'''
print(msg_error)
exit(-1)
elif next_token == 'fout_val':
try:
pll_fout = getHz(token)
except ValueError:
msg_error = f'''
ERROR: En el valor especificado para --fout {token}
{token} no tiene el formato adecuado. '''
print(msg_error)
exit(-1)
next_token = 'name'
elif next_token == 'fin_val':
try:
pll_fin = getHz(token)
except ValueError:
msg_error = f'''
ERROR: En el valor especificado para --fin {token}
{token} no tiene el formato adecuado. '''
print(msg_error)
exit(-1)
next_token = 'name'
if pll_fout == 0:
print('ERROR: No ha especificado una frecuencia de salida válida')
exit(-1)
if pll_fin < pllfin_min or pll_fin > pllfin_max:
print(f'Frecuencia de entrada fuera del intervalo admitido: ({pllfin_min/1000000}M, {pllfin_max/1000000}M)')
exit(-1)
if pll_fout < pllfout_min or pll_fout > pllfout_max:
print(f'Frecuencia de salida fuera del intervalo admitido: ({pllfout_min/1000000}M, {pllfout_max/1000000}M)')
exit(-1)
if verbose > 0:
print(f'Calculando para: Fin={pll_fin / 1000000}MHz, Fout={pll_fout / 1000000}MHz')
pll_n1, pll_n2, pll_m = pll_settings(pll_fin, pll_fout)
print(f'N1 = {pll_n1}')
print(f'N2 = {pll_n2}')
print(f'M = {pll_m}')
act_fout = pll(pll_fin, pll_n1, pll_n2, pll_m)
print(f'Frecuencia obtenida: Fout={act_fout / 1000000}MHz')
print(f'error: {100 * (act_fout - pll_fout) / pll_fout}%')
print(f'Punto de funcionamiento: fplli = {pll_fin / pll_n1 / 1000000}MHz, fvco = {pll_fout * pll_n2 / 1000000}MHz')
if __name__ == '__main__':
main()
|
from collections import defaultdict
from datetime import datetime, timedelta
from random import randint
from re import search
from time import time
from . import db
from .cmds import games
welcomed = []
messages = defaultdict(int)
def process(bot, user, message):
update_records(bot, user)
if user["id"] not in welcomed:
welcome(bot, user)
elif "bye" in message:
say_goodbye(bot, user)
# if user["id"] != "190089937"
check_activity(bot, user)
if (match := search(r'cheer[0-9]+', message)) is not None:
thank_for_cheer(bot, user, match)
if (h := games.heist) is not None:
if h.start_time <= time() and not h.running:
games.run_heist(bot)
elif h.end_time <= time() and h.running:
games.end_heist(bot)
def add_user(bot, user):
db.execute("INSERT OR IGNORE INTO users (UserID, UserName) VALUES (?, ?)",
user["id"], user["name"].lower())
def update_records(bot, user):
db.execute("UPDATE users SET UserName = ?, MessagesSent = MessagesSent + 1 WHERE UserID = ?",
user["name"].lower(), user["id"])
stamp = db.field("SELECT CoinLock FROM users WHERE UserID = ?",
user["id"])
if datetime.strptime(stamp, "%Y-%m-%d %H:%M:%S") < datetime.utcnow():
coinlock = (datetime.utcnow()+timedelta(seconds=60)).strftime("%Y-%m-%d %H:%M:%S")
db.execute("UPDATE users SET Coins = Coins + ?, CoinLock = ? WHERE UserID = ?",
randint(1, 5), coinlock, user["id"])
def welcome(bot, user):
bot.send_message(f"Welcome to the stream {user['name']}!")
welcomed.append(user["id"])
def say_goodbye(bot, user):
bot.send_message(f"See ya later {user['name']}!")
welcomed.remove(user["id"])
def check_activity(bot, user):
messages[user["id"]] += 1
if (count := messages[user["id"]]) % 3 == 0:
bot.send_message(f"Thanks for being active in chat {user['name']} - you've sent {count:,} messages! Keep it up!")
def thank_for_cheer(bot, user, match):
bot.send_message(f"Thanks for the {match.group[5:]:,} bits {user['name']}! That's really appreciated!")
|
import sys
import os
import berserk
import chess
from pathlib import Path
from lichs.Game import Game
from lichs.api_key import set_api
def main():
try:
set_api(sys.argv[1])
os._exit(0)
except:
try:
token_file = Path(__file__).parent.absolute() / "key"
token = token_file.read_text()
session = berserk.TokenSession(token)
client = berserk.clients.Client(session)
board = berserk.clients.Board(session)
except:
print("The API token is either empty or wrong. Please run the command 'lichess' and input your API token as a second argument, i.e 'lichs <api_token>'. If you need more help, please see the instructions in the Github README: \nhttps://github.com/Cqsi/lichs#how-to-generate-a-personal-api-token")
os._exit(0)
# Gets your account data, e.g ["id"], ["username"]
account_data = client.account.get()
player_id = account_data["id"]
# Welcome text
print("Welcome to Lichess!\n")
print("What kind of chess do you want to play?")
print("1. Rapid (10+0)\n2. Classical (30+0)\n")
num = input("Enter 1 or 2: ")
time = 0
if num=="1":
time=10
elif num=="2":
time=30
else:
# This needs improvement, something like a while/for loop
print("Something went wrong, please enter the lichess command again.")
sys.exit()
print("Searching after opponent...")
board.seek(time, 0)
for event in board.stream_incoming_events():
if event['type'] == 'gameStart':
print("An opponent was found!")
isWhite = True
color = "Black" # We set the color to the opposite color of the player
if player_id != client.games.export(event['game']['id'])['players']['white']['user']['id']:
isWhite = False
color = "White"
print("You're playing as black!")
print("White's turn...")
else:
print("You're playing as white!")
game = Game(board, event['game']['id'], player_id, isWhite, color)
game.start()
|
# https://www.acmicpc.net/problem/9498
a = int(input())
if (a >= 90) and (a <= 100):
print("A")
elif (a >= 80) and (a < 90):
print("B")
elif (a >= 70) and (a < 80):
print("C")
elif (a >= 60) and (a < 70):
print("D")
else:
print("F")
|
"""Main functions to retrieve the relevant data of the article corresponding
to the given arXiv identifier. Also helper function to check if arXiv
identifier exists.
"""
import re
import requests
from lxml import html
import src.article as article
def get_year(ax_id):
"""Extract the year from an arXiv identifier (in format YYYY)."""
modern_ax_id = re.compile(r"([0-9]{2})([0-9]{2})\.([0-9]+)")
search_modern = re.search(modern_ax_id, ax_id)
if search_modern:
year = "20" + search_modern[1]
else:
old_ax_id = re.compile(r"([a-zA-Z]+[-]?[a-zA-Z]+)/([0-9]{2})([0-9]+)")
search_old = re.search(old_ax_id, ax_id)
# get century right
if search_old[2][0] == "9":
year = "19" + search_old[2]
else:
year = "20" + search_old[2]
return year
def arxiv(ax_id):
"""Ask for arXiv identifier and return corresponding Article class
or None if arXiv identifier does not exist.
"""
# python 3 truncates leading zeros but these might occur
# in arxiv identifiers. TODO: check!
if not check(ax_id):
print("Not a correct arXiv identifier. Please try again.")
return None
ax_id = str(ax_id).zfill(9)
article_year = get_year(ax_id)
abs_url = "https://arxiv.org/abs/{}".format(ax_id)
src_abs = requests.get(abs_url)
# obtain a _structured_ document ("tree") of source of abs_url
page_tree = html.fromstring(src_abs.content)
# extract title and abstract from page tree
title_xpath = page_tree.xpath('//meta[@name="citation_title"]/@content')
title = " ".join(title_xpath)
abstract = " ".join(
page_tree.xpath('//meta[@property="og:description"]' + "/@content")
)
# get main subject from page tree
subject_xpath = page_tree.xpath('//span [@class="primary-subject"]')
main_subject = subject_xpath[0].text_content()
# first get all authors (formate compatible with bibtex)
all_authors = page_tree.xpath('//meta[@name="citation_author"]/@content')
if len(all_authors) > 1:
authors_name = " and ".join(all_authors)
else:
authors_name = all_authors[0]
# second create a short and 'contracted' authors' name, e.g.
# to create file name or bibtex key
authors_short_list = [a.split(", ")[0] for a in all_authors[:3]]
if len(all_authors) > 3:
authors_short = authors_short_list[0] + " et al"
authors_contracted = authors_short_list[0] + "EtAl"
elif 1 < len(all_authors) <= 3:
authors_short = ", ".join(authors_short_list[:-1])
authors_short += " and " + authors_short_list[-1]
authors_contracted = "".join(authors_short_list)
else:
authors_short = authors_short_list[0] # TODO: IMPROVE!?!?
authors_contracted = authors_short
return article.Article(
title=title,
authors=authors_name,
authors_short=authors_short,
authors_contracted=authors_contracted,
abstract=abstract,
ax_id=ax_id,
year=article_year,
main_subject=main_subject,
)
def check(ax_id):
""""Helper function to check if arXiv identifier exists."""
abs_url = "https://arxiv.org/abs/{}".format(ax_id)
req = requests.get(abs_url)
# check status of request
return req.status_code == requests.codes.ok
|
"""
4차원 데이터를 2차원으로 변환한 후에 max pooling 구현
"""
import numpy as np
from common.util import im2col
if __name__ == '__main__':
np.random.seed(116)
# 가상의 이미지 데이터(c,h,w) = (3,4,4) 1개를 난수로 생성 -> (1,3,4,4)
x = np.random.randint(10, size=(1, 3, 4, 4))
print(x, 'shape:', x.shape)
# 4차원 데이터를 2차원 ndarray로 변환
col = im2col(x, filter_h=2, filter_w=2, stride=2, pad=0)
print(col, 'shape:', col.shape) # 4*12
# max pooling : 채널별로 최댓값을 찾음
# 채널별 최댓값을 쉽게 찾기 위해 2차원 배열의 Shape을 변환
col = col.reshape(-1, 2 * 2) # (-1, fh*fw)
print(col, 'shape:', col.shape)
# 각 행(row)에서 최댓값을 찾음.
out = np.max(col, axis=1)
print(out, 'shape:', out.shape)
# 1차원 pooling의 결과를 4차원으로 변환: (n, oh, ow, c) → (n, c, oh, ow)
out = out.reshape(1, 2, 2, 3)
print(out)
out = out.transpose(0, 3, 1, 2)
|
import slam.plot as splt
import slam.io as sio
import slam.remeshing as srem
if __name__ == '__main__':
# source object files
source_mesh_file = 'data/example_mesh.gii'
source_texture_file = 'data/example_texture.gii'
source_spherical_mesh_file = 'data/example_mesh_spherical.gii'
# target object files
target_mesh_file = 'data/example_mesh_2.gii'
target_spherical_mesh_file = 'data/example_mesh_2_spherical.gii'
source_mesh = sio.load_mesh(source_mesh_file)
source_tex = sio.load_texture(source_texture_file)
source_spherical_mesh = sio.load_mesh(source_spherical_mesh_file)
target_mesh = sio.load_mesh(target_mesh_file)
target_spherical_mesh = sio.load_mesh(target_spherical_mesh_file)
interpolated_tex_values = \
srem.spherical_interpolation_nearest_neigbhor(source_spherical_mesh,
target_spherical_mesh,
source_tex.darray[0])
# plot
visb_sc = splt.visbrain_plot(mesh=source_mesh, tex=source_tex.darray[0],
caption='source with curvature',
cblabel='curvature')
visb_sc = splt.visbrain_plot(mesh=source_spherical_mesh,
tex=source_tex.darray[0],
caption='spherical source mesh',
cblabel='curvature', visb_sc=visb_sc)
visb_sc = splt.visbrain_plot(mesh=target_mesh, tex=interpolated_tex_values,
caption='target mesh with curvature '
'from source mesh',
cblabel='curvature', visb_sc=visb_sc)
visb_sc.preview()
|
'''
Created on Jan 4, 2019
@author: bergr
'''
import numpy as np
from suitcase.nxstxm.stxm_types import scan_types, two_posner_scans
from suitcase.nxstxm.device_names import *
#from suitcase.nxstxm.utils import dct_get, dct_put
from suitcase.nxstxm.roi_dict_defs import *
# from suitcase.nxstxm.nxstxm_utils import (make_signal, _dataset, _string_attr, _group, make_1d_array, \
# get_nx_standard_epu_mode, get_nx_standard_epu_harmonic_new, translate_pol_id_to_stokes_vector, \
# readin_base_classes, make_NXclass, remove_unused_NXsensor_fields)
from suitcase.nxstxm.nxstxm_utils import _dataset, _string_attr, make_1d_array
import suitcase.nxstxm.nx_key_defs as nxkd
MARK_DATA = False
# parent.modify_single_image_ctrl_str_attrs(cntrl_nxgrp, doc)
# parent.modify_single_image_ctrl_data_grps(cntrl_nxgrp, doc)
def modify_single_image_ctrl_data_grps(parent, nxgrp, doc, scan_type):
'''
:param nxgrp:
:param doc:
:return:
'''
resize_data = False
rois = parent.get_rois_from_current_md(doc['run_start'])
x_src = parent.get_devname(rois[SPDB_X][POSITIONER])
x_posnr_nm = parent.fix_posner_nm(rois[SPDB_X][POSITIONER])
# x_posnr_src = rois[SPDB_X]['SRC']
y_src = parent.get_devname(rois[SPDB_Y][POSITIONER])
y_posnr_nm = parent.fix_posner_nm(rois[SPDB_Y][POSITIONER])
# y_posnr_src = rois[SPDB_Y]['SRC']
xnpoints = int(rois[SPDB_X][NPOINTS])
ynpoints = int(rois[SPDB_Y][NPOINTS])
ttlpnts = xnpoints * ynpoints
# uid = list(parent._cur_scan_md.keys())[0]
# primary_det_nm = parent.get_primary_det_nm(uid)
# prim_data_lst = parent._data['primary'][primary_det_nm]['data']
# if (len(prim_data_lst) < ttlpnts):
resize_data = True
# scan was aborted so use setpoint data here
xdata = np.array(rois[SPDB_X][SETPOINTS], dtype=np.float32)
ydata = np.array(rois[SPDB_Y][SETPOINTS], dtype=np.float32)
# else:
# # use actual data
# # xdata is teh first xnpoints
# xdata = np.array(parent._data['primary'][x_src]['data'][0:xnpoints], dtype=np.float32)
# # ydata is every ynpoint
# ydata = np.array(parent._data['primary'][y_src]['data'][0::ynpoints], dtype=np.float32)
_dataset(nxgrp, y_posnr_nm, ydata, 'NX_FLOAT')
_dataset(nxgrp, x_posnr_nm, xdata, 'NX_FLOAT')
# this should be an array the same shape as the 'data' group in NXdata filled with the storagering current
#_sr_data = parent._data['baseline'][uid][parent.get_devname(DNM_RING_CURRENT) + '_val']['data']
_sr_data = parent.get_baseline_all_data(parent.get_devname(DNM_RING_CURRENT) + '_val')
sr_data = np.linspace(_sr_data[0], _sr_data[1], ttlpnts)
#if (resize_data):
# sr_data = np.resize(sr_data, (ttlpnts,))
_dataset(nxgrp, 'data', np.reshape(sr_data, (ynpoints, xnpoints)), 'NX_NUMBER')
modify_single_image_ctrl_str_attrs(parent, nxgrp, doc)
def modify_single_image_ctrl_str_attrs(parent, nxgrp, doc):
'''
:param nxgrp:
:param doc:
:return:
'''
rois = parent.get_rois_from_current_md(doc['run_start'])
x_posnr_nm = parent.fix_posner_nm(rois[SPDB_X][POSITIONER])
y_posnr_nm = parent.fix_posner_nm(rois[SPDB_Y][POSITIONER])
_string_attr(nxgrp, 'axes', [y_posnr_nm, x_posnr_nm])
def modify_single_image_nxdata_group(parent, data_nxgrp, doc, scan_type):
'''
:param entry_nxgrp:
:param cntr_nm:
:param doc:
:param scan_type:
:return:
'''
resize_data = False
rois = parent.get_rois_from_current_md(doc['run_start'])
x_src = parent.get_devname(rois[SPDB_X][POSITIONER])
x_posnr_nm = parent.fix_posner_nm(rois[SPDB_X][POSITIONER])
# x_posnr_src = rois[SPDB_X]['SRC']
y_src = parent.get_devname(rois[SPDB_Y][POSITIONER])
y_posnr_nm = parent.fix_posner_nm(rois[SPDB_Y][POSITIONER])
# y_posnr_src = rois[SPDB_Y]['SRC']
xnpoints = rois[SPDB_X][NPOINTS]
ynpoints = rois[SPDB_Y][NPOINTS]
ttlpnts = xnpoints * ynpoints
#prim_data_lst = parent._data['primary'][x_src]['data']
#uid = list(parent._cur_scan_md.keys())[0]
uid = parent.get_current_uid()
primary_det_nm = parent.get_primary_det_nm(uid)
prim_data_arr = np.array(parent._data['primary'][primary_det_nm][uid]['data'])
if(scan_types(scan_type) is scan_types.PATTERN_GEN_SCAN):
rows, cols = ynpoints, xnpoints
else:
rows, cols = prim_data_arr.shape
#rows, cols = prim_data_arr.shape
if ((rows * cols) < ttlpnts):
#scn had been aborted
resize_data = True
# scan was aborted so use setpoint data here
xdata = np.array(rois[SPDB_X][SETPOINTS], dtype=np.float32)
ydata = np.array(rois[SPDB_Y][SETPOINTS], dtype=np.float32)
else:
if(x_src not in parent._data['primary'].keys()):
xdata = np.array(rois[SPDB_X][SETPOINTS], dtype=np.float32)
ydata = np.array(rois[SPDB_Y][SETPOINTS], dtype=np.float32)
else:
# use actual data
# xdata is teh first xnpoints
xdata = np.array(parent._data['primary'][x_src][uid]['data'][0:xnpoints], dtype=np.float32)
# ydata is every ynpoint
ydata = np.array(parent._data['primary'][y_src][uid]['data'][0::ynpoints], dtype=np.float32)
_dataset(data_nxgrp, y_posnr_nm, ydata, 'NX_FLOAT')
_dataset(data_nxgrp, x_posnr_nm, xdata, 'NX_FLOAT')
_string_attr(data_nxgrp, 'axes', [y_posnr_nm, x_posnr_nm])
_string_attr(data_nxgrp, 'signal', 'data')
det_nm = parent.get_primary_det_nm(doc['run_start'])
# three_d_scans = [scan_types.DETECTOR_IMAGE, scan_types.OSA_IMAGE, scan_types.OSA_FOCUS, scan_types.SAMPLE_FOCUS, scan_types.SAMPLE_IMAGE_STACK, \
# scan_types.COARSE_IMAGE_SCAN, scan_types.COARSE_GONI_SCAN, scan_types.TOMOGRAPHY_SCAN]
three_d_scans = [scan_types.DETECTOR_IMAGE, scan_types.OSA_IMAGE, scan_types.OSA_FOCUS, scan_types.SAMPLE_FOCUS, \
scan_types.COARSE_IMAGE_SCAN, scan_types.COARSE_GONI_SCAN, scan_types.TOMOGRAPHY_SCAN]
if(scan_types(scan_type) in three_d_scans):
# det_data = np.array(parent._data['primary'][det_nm]['data'], dtype=np.float32).reshape((1, ynpoints, xnpoints))
det_data = np.array(parent._data['primary'][det_nm][uid]['data'], dtype=np.float32)
if (resize_data):
det_data = parent.fix_aborted_data(det_data, ttlpnts)
det_data = np.reshape(det_data, (1, ynpoints, xnpoints))
if(MARK_DATA):
# put a black box in corner
c = int(xnpoints / 3)
r = int(xnpoints / 3)
for n in range(r):
det_data[0, n, 0:c] = 0
else:
# det_data = np.array(parent._data['primary'][det_nm]['data'], dtype=np.float32).reshape((ynpoints, xnpoints))
det_data = np.array(parent._data['primary'][det_nm][uid]['data'], dtype=np.float32)
_dataset(data_nxgrp, 'data', det_data, 'NX_NUMBER')
def modify_single_image_instrument_group(parent, inst_nxgrp, doc, scan_type):
'''
:param nxgrp:
:param doc:
:param scan_type:
:return:
'''
rois = parent.get_rois_from_current_md(doc['run_start'])
dwell = parent._cur_scan_md[doc['run_start']]['dwell'] * 0.001
det_nm = parent.get_primary_det_nm(doc['run_start'])
scan_type = parent.get_stxm_scan_type(doc['run_start'])
ttl_pnts = rois[SPDB_X][NPOINTS] * rois[SPDB_Y][NPOINTS]
uid = parent.get_current_uid()
det_data = np.array(parent._data['primary'][det_nm][uid]['data']) # .reshape((ynpoints, xnpoints))
parent.make_detector(inst_nxgrp, parent._primary_det_prefix, det_data, dwell, ttl_pnts, units='counts')
sample_x_data = make_1d_array(ttl_pnts, parent.get_sample_x_data('start'))
sample_y_data = make_1d_array(ttl_pnts, parent.get_sample_y_data('start'))
parent.make_detector(inst_nxgrp, nxkd.SAMPLE_X, sample_x_data, dwell, ttl_pnts, units='um')
parent.make_detector(inst_nxgrp, nxkd.SAMPLE_Y, sample_y_data, dwell, ttl_pnts, units='um')
if (scan_type in two_posner_scans):
xnpoints = rois[SPDB_X][NPOINTS]
ynpoints = rois[SPDB_Y][NPOINTS]
ttl_pnts = rois[SPDB_X][NPOINTS] * rois[SPDB_Y][NPOINTS]
x_src = parent.get_devname(rois[SPDB_X][POSITIONER])
x_posnr_nm = parent.fix_posner_nm(rois[SPDB_X][POSITIONER])
y_src = parent.get_devname(rois[SPDB_Y][POSITIONER])
y_posnr_nm = parent.fix_posner_nm(rois[SPDB_Y][POSITIONER])
# xdata is teh first xnpoints
if(x_src not in parent._data['primary'].keys()):
xdata = np.array(rois[SPDB_X][SETPOINTS], dtype=np.float32)
ydata = np.array(rois[SPDB_Y][SETPOINTS], dtype=np.float32)
else:
xdata = parent._data['primary'][x_src]['data'][0:xnpoints]
# ydata is every ynpoint
ydata = parent._data['primary'][y_src]['data'][0::ynpoints]
parent.make_detector(inst_nxgrp, y_posnr_nm, np.tile(ydata, ynpoints), dwell, ttl_pnts, units='um')
parent.make_detector(inst_nxgrp, x_posnr_nm, np.tile(xdata, xnpoints), dwell, ttl_pnts, units='um')
|
#!/usr/bin/env python3
import shutil, os, argparse, sys, stat
sys.path.append("scripts/pyUtils")
sys.path.append("scripts/setUpScripts")
from utils import Utils
from genFuncs import genHelper
def main():
name = "TwoBit"
libs = "njhcpp:v2.6.3"
args = genHelper.parseNjhConfigureArgs()
cmd = genHelper.mkConfigCmd(name, libs, sys.argv)
Utils.run(cmd)
main()
|
def distanceCentrality(network):
n = network.GetNodes()
Components = snap.TCnComV() # holds the connected components in a network
snap.GetSccs(network, Components)
nodeCloseness = dict() # dictionary {node: degree}
for NI in network.Nodes():
NId = NI.GetId()
farness = snap.GetFarnessCentr( network, NId, True, True) # this call gives us the farness (un-normalized) sum to the nodes in this node's connected component
if farness == 0: # this node has an out-degree of 0 and can't reach other nodes
nodeCloseness[ NI.GetId() ] = n-1 # normalized avg distance to other nodes
continue
disconnected_nodes = 0 # how many nodes are disconnected from current node
for CnCom in Components:
if NId in CnCom: # we found the CnCom
disconnected_nodes = n - CnCom.Len() # subtract away the size of our connected component
break
nodeCloseness[ NI.GetId() ] = farness
#+ disconnected_nodes * n
sorted_nodeCloseness = sorted( ((value, key) for (key,value) in nodeCloseness.items()), reverse = False) # sort, ascending
with open("distanceCentrality.txt", "w") as f3:
for pair in sorted_nodeCloseness: # (value, node) format
f3.write( str(pair[1]) + "\t" + str(pair[0]) + "\n" )
|
import os
import numpy
import gym
from gym import spaces
try:
import gym_minigrid
from gym_minigrid.wrappers import *
except:
pass
def make_env(env_id, seed, rank, log_dir):
def _thunk():
env = gym.make(env_id)
env.seed(seed + rank)
# Maxime: until RL code supports dict observations, squash observations into a flat vector
if isinstance(env.observation_space, spaces.Dict):
env = FlatObsWrapper(env)
return env
return _thunk
|
#!/usr/bin/python
from nn_f0nDuration import ANN
import pandas as pd
import numpy as np
from sklearn.datasets import load_boston
flag_arctic_duration = 0
flag_boston_prices = 1
if flag_boston_prices == 1:
boston = load_boston()
print 'Loaded Data successfully'
X = boston.data
Y = boston.target
from sklearn import preprocessing
xscaler = preprocessing.StandardScaler().fit(X)
X_mean = xscaler.mean_
X_std = xscaler.scale_
X = xscaler.transform(X)
min_max_scaler = preprocessing.MinMaxScaler()
Y_train_minmax=min_max_scaler.fit_transform(Y)
Y = Y_train_minmax + 0.00001
print ' Training Data normalized'
np.savetxt('../data/boston/boston.data',X, fmt='%1.3f')
np.savetxt('../data/boston/boston.target',Y,fmt='%1.3f')
NN = ANN()
NN.ANN('../data/boston/boston.data', '../data/boston/boston.target')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os, argparse, time
from util import util
# gestion des options en ligne de commande
parser = argparse.ArgumentParser (description='MQTT metrics gateway to InfluxDB')
parser.add_argument ('--broker', '-b', default='127.0.0.1', help='MQTT broker address - default to localhost')
parser.add_argument ('--port', '-p', default=1883, type=int, help='MQTT broker port - default to 1883')
parser.add_argument ('--id', '-i', default='monitor', help='Id of MQTT client - default to monitor')
parser.add_argument ('--user', '-u', default='monitor', help='username to connect to MQTT broker')
parser.add_argument ('--debug', '-d', action='store_true', help='Enable MQTT tracing')
parser.add_argument ('--quiet', '-q', action='store_true', help='Disable console tracing')
args = parser.parse_args ()
# initialisation des logs
logger = util.init_log ('mqttmonitor', args.quiet)
# import des librairies
try:
import requests
import paho.mqtt.client as mqtt
except ImportError as e:
logger.error (e)
exit (1)
# mise en place call-backs MQTT Mosquito ('userdata' correspond au logger)
def on_connect (client, userdata, flags, rc):
if rc != 0:
if rc == 1:
userdata.error ('MQTT connexion Error: incorrect protocol')
elif rc == 2:
userdata.error ('MQTT connexion Error: invalid client identifier')
elif rc == 3:
userdata.critical ('MQTT connexion Error: server unavailable')
elif rc == 4:
userdata.error ('MQTT connexion Error: bad username or password')
elif rc == 5:
userdata.error ('MQTT connexion Error: not authorized')
else:
userdata.critical ('MQTT connexion error: {code}'.format(code=str(rc)))
exit (1)
userdata.info ('MQTT connexion success')
def on_subscribe (client, userdata, mid, granted_qos):
userdata.info ('subscribe: {q} - {m}'.format (q=str(granted_qos[0]), m=str(mid)))
def on_message (client, userdata, msg):
userdata.info ('msg: {t}:{p}'.format (t=msg.topic, p=str(msg.payload)))
def on_bytes (client, userdata, msg):
userdata.info ('bytes: {t}:{p}'.format (t=msg.topic, p=str(msg.payload)))
if msg.topic.find ('sent') >= 0:
data = 'bytes,direction=sent value={0}'.format (msg.payload)
elif msg.topic.find ('received') >= 0:
data = 'bytes,direction=received value={0}'.format (msg.payload)
else:
userdata.error ('unexpected topic: {t}'.format (t=msg.topic))
data = None
if data is not None:
params = {'db': 'mqtt'}
r = requests.post ('http://127.0.0.1:8086/write', params=params, data=data)
if r.status_code != 204:
userdata.warning ('influxdb: error {0} - {1}'.format (str(r.status_code), r.text))
def on_publish (client, userdata, msg):
userdata.info ('bytes: {t}:{p}'.format (t=msg.topic, p=str(msg.payload)))
if msg.topic.find ('dropped') >= 0:
data = 'publish,direction=dropped value={0}'.format (msg.payload)
elif msg.topic.find ('received') >= 0:
data = 'publish,direction=received value={0}'.format (msg.payload)
elif msg.topic.find ('sent') >= 0:
data = 'publish,direction=sent value={0}'.format (msg.payload)
else:
userdata.error ('unexpected topic: {t}'.format (t=msg.topic))
data = None
if data is not None:
params = {'db': 'mqtt'}
r = requests.post ('http://127.0.0.1:8086/write', params=params, data=data)
if r.status_code != 204:
userdata.warning ('influxdb: error {0} - {1}'.format (str(r.status_code), r.text))
def on_msg (client, userdata, msg):
userdata.info ('message: {t}:{p}'.format (t=msg.topic, p=str(msg.payload)))
if msg.topic.find ('sent') >= 0:
data = 'messages,direction=sent value={0}'.format (msg.payload)
elif msg.topic.find ('received') >= 0:
data = 'messages,direction=received value={0}'.format (msg.payload)
else:
userdata.error ('unexpected topic: {t}'.format (t=msg.topic))
data = None
if data is not None:
params = {'db': 'mqtt'}
r = requests.post ('http://127.0.0.1:8086/write', params=params, data=data)
if r.status_code != 204:
userdata.warning ('influxdb: error {0} - {1}'.format (str(r.status_code), r.text))
def on_disconnect (client, userdata, rc):
userdata.info ('deconnected: {code}'.format (code=str(rc)))
def on_log (client, userdata, level, buf):
userdata.info ('MQTT log: {l}-{b}'.format (l=level, b=buf))
# récupération mot de passe pour connexion MQTT broker
try:
mqtt_passwd = os.environ['MQTT_PASSWD']
except KeyError as e:
logger.error ('Please set MQTT_PASSWD environment variable')
exit (1)
# création du client MQTT et mise en place du 'will'
client = mqtt.Client (client_id=args.id, clean_session=True, userdata=logger)
client.will_set (topic='monitor/monitor', payload='Aborting', qos=0, retain=True)
client.username_pw_set (username=args.user, password=mqtt_passwd)
# mise en place des call-backs
client.on_connect = on_connect
client.on_subscribe = on_subscribe
client.on_message = on_message
client.on_disconnect = on_disconnect
if args.debug is True:
client.on_log = on_log
# création base influxdb
params = {'q': 'CREATE DATABASE mqtt WITH DURATION 2d NAME mqttmon'}
r = requests.post ('http://127.0.0.1:8086/query', params=params)
if r.status_code != 204:
logger.error (r.text)
# connection au broker MQTT
try:
client.connect (host=args.broker, port=args.port, keepalive=60)
except IOError as e:
logger.critical (e)
exit (1)
# abonnement métriques broker
client.subscribe (topic='$SYS/broker/load/bytes/+/1min')
client.subscribe (topic='$SYS/broker/load/messages/+/1min')
client.subscribe (topic='$SYS/broker/load/publish/+/1min')
client.message_callback_add (sub='$SYS/broker/load/bytes/+/1min', callback=on_bytes)
client.message_callback_add (sub='$SYS/broker/load/messages/+/1min', callback=on_msg)
client.message_callback_add (sub='$SYS/broker/load/publish/+/1min', callback=on_publish)
logger.info ('Starting program')
# boucle évènements
try:
client.loop_forever ()
except KeyboardInterrupt as e:
logger.info (e)
client.disconnect ()
logger.info ('Ending Program')
|
# Generated file.
|
import json
import asyncio
import discord
import os
import config
import requests
import aiohttp
# kgs room id
ROOM=788560
# kgs bot account name
kgs_bot = "LooselyBot"
# kgs json api to read data
kgs_url = "http://www.gokgs.com/json/access"
# bold wrapper
def bold(fn):
def wrapper(user):
# insert some bold before and after text
return "**" + fn(user) + "**"
return wrapper
async def login(session, status):
if status == 200:
return
# need to put your account name/password
message = {
"type": "LOGIN",
"name": kgs_bot,
"password": "",
"locale": "en_US",
}
formatted_message = json.dumps(message)
await session.post(kgs_url, data=formatted_message)
async def logout(session):
session.post(kgs_url, json.dumps({"type": "LOGOUT"}))
@bold
def formatted_username(user):
"""
user: KGS player dict
yes_status: boolean to dicate whether to contain status of user
Given a KGS player dict user, return a string containing user information
"""
rank = ""
if "g" in user["flags"]:
rank = "Guest"
elif "rank" in user:
rank = user["rank"]
else:
rank = "-"
return user['name'] + " [" + rank + "]"
def formatted_user_status(user):
if "flags" not in user:
return ""
for k, v in config.kgs_user_flags_status.items():
if k in user["flags"]:
return v
def formatted_name(player):
"""
Given a KGS player dict, return a str: username(rank)
https://www.gokgs.com/json/dataTypes.html#user
"""
text = "**" + player['name'] + "**"
if 'rank' in player:
text += " (" + player['rank'] + ")"
return
def result(score):
"""
Returns a string indicating the result of a game
https://www.gokgs.com/json/dataTypes.html#score
"""
if type(score) == float:
if score > 0:
out = "Black + " + str(score)
else:
out = "White + " + str(-score)
else:
out = score
return out
async def get_kgs_players(m, session, bot, channel):
if m["type"] == "ROOM_JOIN" and m["channelId"] == ROOM and "users" in m:
text = ""
for u in m["users"]:
text += "\n" + formatted_username(u)+ ": "
text += formatted_user_status(u)
await send_discord_message(bot, channel, text)
await send_kgs_messages(session, text)
async def handle_message(session, bot, channel, data, op):
if data is None or not "messages" in data:
#print("not in message")
return
if "messages" in data:
for m in data["messages"]:
if "channelId" in m and "type" in m:
if op == 1:
await get_kgs_players(m, session, bot, channel)
#text = ""
#for u in m["users"]:
# text += "\n" + formatted_username(u)+ ": "
# text += formatted_user_status(u)
#await send_discord_message(bot, channel, text)
# await send_kgs_messages(session, text)
if op == 0 and m['type'] == 'CHAT' and m['channelId'] == ROOM:
if m['user']['name'] != kgs_bot:
text = formatted_username(m["user"]) + ": "
#if (m["text"].startswith("/") and m["text"] in config.clyde_command)
if m["text"] in config.clyde_command:
text += config.clyde_command[m["text"]]
await send_kgs_messages(session,
[config.clyde_command[m["text"]]])
elif m["text"] in config.addon_command:
text += config.addon_command[m["text"]]
await send_kgs_messages(session,
[config.addon_command[m["text"]]])
else:
text += m["text"]
# await send_discord_message(bot, channel, m["text"])
#text = formatted_username(m["user"]) + ": " + m["text"]
#print("text is " , text)
await send_discord_message(bot, channel, text)
if m['type'] == 'LOGOUT':
await login(session, 0)
async def get_messages(session, bot, channel, op):
async with session.get(kgs_url) as r:
data = await r.json()
await handle_message(session, bot, channel, await r.json(), op)
#print(data)
#print(json.dumps(data))
# if "messages" in data:
# for m in data["messages"]:
# if m["type"] == "CHAT" and m["channelId"] == ROOM:
# text = m["user"]["name"] + " [" + m["user"]["rank"] + "]: " + m["text"]
# print(text)
# await send_discord_message(text, bot)
async def send_discord_message(bot, channel, message):
await bot.send_message(channel, message)
async def send_kgs_messages(session, messages):
for text in messages:
message = {
"type": "CHAT",
"text": text,
"channelId": ROOM
}
formatted_message = json.dumps(message)
await session.post(kgs_url, data=formatted_message)
|
# -*- coding: utf-8 -*-
"""
Zurich Instruments LabOne Python API Example
Demonstrate how to connect to a Zurich Instruments UHF Lock-in Amplifier and
obtain output from the Input PWA and Boxcar using ziDAQServer's poll() command.
"""
# Copyright 2016 Zurich Instruments AG
from __future__ import print_function
import time
import numpy as np
import zhinst.utils
def run_example(device_id, do_plot=False):
"""
Run the example: Connect to a Zurich Instruments UHF Lock-in Amplifier and
obtain Input PWA and Boxcar data via ziDAQServer's synchronous poll command.
Requirements:
UHFLI with UHF-BOX Boxcar Averager Option.
Hardware configuration: Connect signal output 1 to signal input 1 with a
BNC cable.
Arguments:
device_id (str): The ID of the device to run the example with. For
example, `dev2006` or `uhf-dev2006`.
do_plot (bool, optional): Specify whether to plot the boxcar and inputpwa
output. Default is no plot output.
Returns:
sample (dict): A dictionary containing the boxcar sample.
Raises:
Exception: If the BOX Option is not installed.
RuntimeError: If the device is not "discoverable" from the API.
See the "LabOne Programing Manual" for further help, available:
- On Windows via the Start-Menu:
Programs -> Zurich Instruments -> Documentation
- On Linux in the LabOne .tar.gz archive in the "Documentation"
sub-folder.
"""
apilevel_example = 6 # The API level supported by this example.
err_msg = "This example can only be ran on UHF Instruments with the BOX option enabled."
# Call a zhinst utility function that returns:
# - an API session `daq` in order to communicate with devices via the data server.
# - the device ID string that specifies the device branch in the server's node hierarchy.
# - the device's discovery properties.
(daq, device, props) = zhinst.utils.create_api_session(device_id, apilevel_example, required_devtype='UHF',
required_options=['BOX'], required_err_msg=err_msg)
zhinst.utils.api_server_version_check(daq)
# Create a base configuration: Disable all available outputs, awgs, demods, scopes,...
zhinst.utils.disable_everything(daq, device)
# Now configure the instrument for this experiment. The following channels
# and indices work on all device configurations. The values below may be
# changed if the instrument has multiple input/output channels and/or either
# the Multifrequency or Multidemodulator options installed.
out_channel = 0
out_mixer_channel = zhinst.utils.default_output_mixer_channel(props)
in_channel = 0
osc_index = 0
frequency = 400e3
boxcar_index = 0
inputpwa_index = 0
amplitude = 0.5
frequency = 9.11e6
windowstart = 75 # boxcar windowstart [degrees]
windowsize = 3e-9 # boxcar windowsize [seconds]
periods_vals = np.logspace(0, 9, 10, base=2)
exp_setting = [['/%s/sigins/%d/imp50' % (device, in_channel), 1],
['/%s/sigins/%d/ac' % (device, in_channel), 0],
['/%s/sigins/%d/range' % (device, in_channel), 2*amplitude],
['/%s/inputpwas/%d/oscselect' % (device, inputpwa_index), osc_index],
['/%s/inputpwas/%d/inputselect' % (device, inputpwa_index), in_channel],
['/%s/inputpwas/%d/mode' % (device, inputpwa_index), 1],
['/%s/inputpwas/%d/shift' % (device, inputpwa_index), 0.0],
['/%s/inputpwas/%d/harmonic' % (device, inputpwa_index), 1],
['/%s/inputpwas/%d/enable' % (device, inputpwa_index), 1],
['/%s/boxcars/%d/oscselect' % (device, boxcar_index), osc_index],
['/%s/boxcars/%d/inputselect' % (device, boxcar_index), in_channel],
['/%s/boxcars/%d/windowstart' % (device, boxcar_index), windowstart],
['/%s/boxcars/%d/windowsize' % (device, boxcar_index), windowsize],
['/%s/boxcars/%d/limitrate' % (device, boxcar_index), 1e3],
['/%s/boxcars/%d/periods' % (device, boxcar_index), periods_vals[0]],
['/%s/boxcars/%d/enable' % (device, boxcar_index), 1],
['/%s/oscs/%d/freq' % (device, osc_index), frequency],
['/%s/sigouts/%d/on' % (device, out_channel), 1],
['/%s/sigouts/%d/enables/%d' % (device, out_channel, out_mixer_channel), 1],
['/%s/sigouts/%d/range' % (device, out_channel), 1],
['/%s/sigouts/%d/amplitudes/%d' % (device, out_channel, out_mixer_channel), amplitude]]
daq.set(exp_setting)
# Wait for boxcar output to settle
time.sleep(periods_vals[0]/frequency)
# Perform a global synchronisation between the device and the data server:
# Ensure that the settings have taken effect on the device before issuing
# the poll().
daq.sync()
# Get the values that were actually set on the device
frequency_set = daq.getDouble('/%s/oscs/%d/freq' % (device, osc_index))
windowstart_set = daq.getDouble('/%s/boxcars/%d/windowstart' % (device, boxcar_index))
windowsize_set = daq.getDouble('/%s/boxcars/%d/windowsize' % (device, boxcar_index))
# Subscribe to the nodes we would like to record data from
boxcar_sample_path = '/%s/boxcars/%d/sample' % (device, boxcar_index)
boxcar_periods_path = '/%s/boxcars/%d/periods' % (device, boxcar_index)
inputpwa_wave_path = '/%s/inputpwas/%d/wave' % (device, inputpwa_index)
daq.subscribe([boxcar_sample_path, boxcar_periods_path, inputpwa_wave_path])
# We use getAsEvent() to ensure we obtain the first ``periods`` value; if
# its value didn't change, the server won't report the first value.
daq.getAsEvent(boxcar_periods_path)
for periods in periods_vals:
time.sleep(0.5)
daq.setInt(boxcar_periods_path, int(periods))
# Poll the data
poll_length = 0.1 # [s]
poll_timeout = 500 # [ms]
poll_flags = 0
poll_return_flat_dict = True
data = daq.poll(poll_length, poll_timeout, poll_flags, poll_return_flat_dict)
# Unsubscribe from all paths
daq.unsubscribe('*')
# Check the dictionary returned by poll contains the subscribed data. The
# data returned is a dictionary with keys corresponding to the recorded
# data's path in the node hierarchy
assert data, "poll returned an empty data dictionary, did you subscribe to any paths?"
assert boxcar_sample_path in data, "data dictionary has no key '%s'" % boxcar_sample_path
assert boxcar_periods_path in data, "data dictionary has no key '%s'" % boxcar_periods_path
assert inputpwa_wave_path in data, "data dictionary has no key '%s'" % inputpwa_wave_path
sample = data[boxcar_sample_path]
# When using API Level 4 (or higher) poll() returns both the 'value' and
# 'timestamp' of the node. These are two vectors of the same length;
# which consist of (timestamp, value) pairs.
boxcar_value = sample['value']
boxcar_timestamp = sample['timestamp']
boxcar_periods_value = data[boxcar_periods_path]['value']
boxcar_periods_timestamp = data[boxcar_periods_path]['timestamp']
print("Measured average boxcar amplitude is {:.5e} V.".format(np.mean(boxcar_value)))
if do_plot:
# get the sample rate of the device's ADCs
clockbase = float(daq.getInt('/%s/clockbase' % device))
# convert timestamps from ticks to seconds via clockbase
boxcar_t = (boxcar_timestamp - boxcar_timestamp[0])/clockbase
boxcar_periods_t = (boxcar_periods_timestamp - boxcar_periods_timestamp[0])/clockbase
boxcar_periods_t[0] = boxcar_t[0]
# Create plot
import matplotlib.pyplot as plt
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
plt.grid(True)
ax1.plot(boxcar_t, boxcar_value, label='boxcar output')
plt.xlabel('Time (s)')
ax2.step(np.append(boxcar_periods_t, boxcar_t[-1]),
np.append(boxcar_periods_value, boxcar_periods_value[-1]), '-r', label='Averaging periods')
ax2.set_yscale("log")
plt.xlim(min(boxcar_t[0], boxcar_periods_t[0]), max(boxcar_t[-1], boxcar_periods_t[-1]))
ax2.legend(loc=1)
plt.title('Boxcar output: The effect of averaging\nperiods on the boxcar value.')
ax1.legend(loc=4)
ax1.set_ylabel('Boxcar value (V)')
ax2.set_ylabel('Number of Averaging Periods')
plt.draw()
plt.show()
plt.figure()
plt.grid(True)
pwa_wave = data[inputpwa_wave_path][-1]
pwa_wave['binphase'] = pwa_wave['binphase']*360/(2*np.pi)
plt.axhline(0, color='k')
# The inputpwa waveform is stored in 'x', currently 'y' is unused.
plt.plot(pwa_wave['binphase'], pwa_wave['x'])
windowsize_set_degrees = 360*frequency_set*windowsize_set
phase_window = (pwa_wave['binphase'] >= windowstart_set) & \
(pwa_wave['binphase'] <= windowstart_set + windowsize_set_degrees)
plt.fill_between(pwa_wave['binphase'], 0, pwa_wave['x'], where=phase_window, alpha=0.5)
plt.xlim(0, 360)
title = 'Input PWA waveform, the shaded region shows the portion\n of the waveform the boxcar is integrating.'
plt.title(title)
plt.xlabel('Phase (degrees)')
plt.ylabel('Amplitude (V)')
plt.draw()
plt.show()
return sample
|
from abc import abstractmethod, ABC
from torch import nn
class Recomputable(ABC):
@abstractmethod
def recompute(self):
pass
def recompute(module: nn.Module):
if isinstance(module, Recomputable):
module.recompute()
|
from sklearn.feature_extraction.text import TfidfVectorizer
class TermClassifier:
def _tfidf(self, data=[]):
"""
term weighting
"""
vector = TfidfVectorizer()
transformed = vector.fit_transform(data)
return transformed.toarray()
|
nums = [3,41,12,9,74,15]
print((len(nums)))
print(max(nums))
print(min(nums))
print(sum(nums))
newsum = sum(nums)/len(nums)
print("{:.4f}".format(newsum))
|
import os
import gc
from psana.psexp.tools import mode
world_size = 1
if mode == 'mpi':
from mpi4py import MPI
world_size = MPI.COMM_WORLD.Get_size()
rank = MPI.COMM_WORLD.Get_rank()
# set a unique jobid (rank 0 process id) for prometheus client
if rank == 0:
prometheus_jobid = os.getpid()
else:
prometheus_jobid = None
prometheus_jobid = MPI.COMM_WORLD.bcast(prometheus_jobid, root=0)
os.environ['PS_PROMETHEUS_JOBID'] = str(prometheus_jobid)
else:
os.environ['PS_PROMETHEUS_JOBID'] = str(os.getpid())
class InvalidDataSource(Exception): pass
from psana.psexp.serial_ds import SerialDataSource
from psana.psexp.singlefile_ds import SingleFileDataSource
from psana.psexp.shmem_ds import ShmemDataSource
from psana.psexp.legion_ds import LegionDataSource
from psana.psexp.null_ds import NullDataSource
def DataSource(*args, **kwargs):
# force garbage collection to clean up old DataSources, in
# particular to cause destructors to run to close old shmem msg queues
gc.collect()
args = tuple(map(str, args)) # Hack: workaround for unicode and str being different types in Python 2
# ==== shared memory ====
if 'shmem' in kwargs:
if world_size > 1:
PS_SRV_NODES = int(os.environ.get('PS_SRV_NODES', '0'))
if PS_SRV_NODES == world_size:
raise RuntimeError('All allocated cores are smalldata servers '
'(%d of %d)!' % (PS_SRV_NODES, world_size))
world_group = MPI.COMM_WORLD.Get_group()
server_group = world_group.Incl(range(PS_SRV_NODES))
client_group = world_group.Excl(range(PS_SRV_NODES))
smalldata_kwargs = {'server_group': server_group,
'client_group': client_group}
kwargs['smalldata_kwargs'] = smalldata_kwargs
# create NullDataSource for ranks 0...PS_SRV_NODES-1
# all others are normal ShmemDataSources
if rank < PS_SRV_NODES:
return NullDataSource(*args, **kwargs)
else:
return ShmemDataSource(*args, **kwargs)
else: # world_size == 1
return ShmemDataSource(*args, **kwargs)
# ==== from experiment directory ====
elif 'exp' in kwargs: # experiment string - assumed multiple files
if mode == 'mpi':
if world_size == 1:
return SerialDataSource(*args, **kwargs)
else:
# >> these lines are here to AVOID initializing node.comms
# that class instance sets up the MPI environment, which
# is global... and can interfere with other uses of MPI
# (particularly shmem). Therefore we wish to isolate it
# as much as possible.
from psana.psexp.node import Communicators
from psana.psexp.mpi_ds import MPIDataSource
comms = Communicators()
smalldata_kwargs = {'server_group' : comms.srv_group(),
'client_group' : comms.bd_group()}
kwargs['smalldata_kwargs'] = smalldata_kwargs
if comms._nodetype in ['smd0', 'eb', 'bd']:
return MPIDataSource(comms, *args, **kwargs)
else:
return NullDataSource(*args, **kwargs)
elif mode == 'legion':
return LegionDataSource(*args, **kwargs)
elif mode == 'none':
return SerialDataSource(*args, **kwargs)
else:
raise InvalidDataSource("Incorrect mode. DataSource mode only supports either mpi, legion, or none (non parallel mode).")
# ==== from XTC file(s) ====
elif 'files' in kwargs: # an xtc file
return SingleFileDataSource(*args, **kwargs)
else:
raise InvalidDataSource("Expected keyword(s) not found. DataSource requires exp, shmem, or files keywords.")
|
import pytest
import tensorflow as tf
from tensorflow import keras
import sys
sys.path.append(".")
from keras_cv_attention_models import attention_layers
# Not included: batchnorm_with_activation, conv2d_no_bias, drop_block, hard_swish, phish, mish, layer_norm
def test_add_pre_post_process_tf():
input_shape = (224, 224, 3)
fake_input = tf.random.uniform([1, *input_shape]) * 255
mm = keras.models.Sequential()
rescale_mode = "tf"
attention_layers.add_pre_post_process(mm, rescale_mode=rescale_mode, input_shape=input_shape)
aa = mm.preprocess_input(fake_input)
bb = keras.applications.imagenet_utils.preprocess_input(fake_input, mode=rescale_mode)
tf.assert_less(tf.abs(aa - bb), 1e-7)
aa = mm.preprocess_input(fake_input[0])
bb = keras.applications.imagenet_utils.preprocess_input(fake_input, mode=rescale_mode)
tf.assert_less(tf.abs(aa - bb), 1e-7)
def test_add_pre_post_process_torch():
input_shape = (224, 224, 3)
fake_input = tf.random.uniform([1, *input_shape]) * 255
mm = keras.models.Sequential()
rescale_mode = "torch"
attention_layers.add_pre_post_process(mm, rescale_mode=rescale_mode, input_shape=input_shape)
aa = mm.preprocess_input(fake_input)
bb = keras.applications.imagenet_utils.preprocess_input(fake_input, mode=rescale_mode)
tf.assert_less(tf.abs(aa - bb), 1e-7)
aa = mm.preprocess_input(fake_input[0])
bb = keras.applications.imagenet_utils.preprocess_input(fake_input, mode=rescale_mode)
tf.assert_less(tf.abs(aa - bb), 1e-7)
def test_add_pre_post_process_raw():
input_shape = (224, 224, 3)
fake_input = tf.random.uniform([1, *input_shape]) * 255
mm = keras.models.Sequential()
rescale_mode = "raw"
attention_layers.add_pre_post_process(mm, rescale_mode=rescale_mode, input_shape=input_shape)
aa = mm.preprocess_input(fake_input)
tf.assert_less(tf.abs(aa - fake_input), 1e-7)
def test_anti_alias_downsample():
input_shape = [2, 28, 28, 192]
strides = 2
out = attention_layers.anti_alias_downsample(tf.ones(input_shape), kernel_size=3, strides=strides)
assert out.shape == [input_shape[0], input_shape[1] // strides, input_shape[2] // strides, input_shape[3]]
def test_BiasLayer():
aa = attention_layers.BiasLayer()
input_shape = [2, 14, 14, 192]
assert aa(tf.ones(input_shape)).shape == input_shape
def test_ChannelAffine():
aa = attention_layers.ChannelAffine()
input_shape = [2, 14, 14, 192]
assert aa(tf.ones(input_shape)).shape == input_shape
def test_ClassToken():
aa = attention_layers.ClassToken()
input_shape = [2, 14 * 14, 192]
assert aa(tf.ones(input_shape)).shape == [input_shape[0], input_shape[1] + 1, input_shape[2]]
def test_ConvPositionalEncoding():
aa = attention_layers.ConvPositionalEncoding()
input_shape = [1, 1 + 14 * 14, 256]
assert aa(tf.ones(input_shape)).shape == input_shape
def test_ConvRelativePositionalEncoding():
aa = attention_layers.ConvRelativePositionalEncoding()
input_shape = [1, 8, 1 + 14 * 14, 6]
assert aa(tf.ones(input_shape), tf.ones(input_shape)).shape == input_shape
def test_cot_attention():
input_shape = [2, 28, 28, 192]
assert attention_layers.cot_attention(tf.ones(input_shape), kernel_size=3).shape == input_shape
def test_eca_module():
input_shape = [2, 28, 28, 192]
out = attention_layers.eca_module(tf.ones(input_shape))
assert out.shape == input_shape
def test_fold_by_conv2d_transpose():
inputs = tf.random.uniform([1, 64, 27, 192])
pad_inputs = tf.pad(inputs, [[0, 0], [1, 1], [1, 1], [0, 0]])
pathes = attention_layers.CompatibleExtractPatches(sizes=3, strides=2, rates=1, padding="VALID")(pad_inputs)
reverse = attention_layers.fold_by_conv2d_transpose(pathes, inputs.shape[1:], kernel_size=3, strides=2, padding="SAME")
assert reverse.shape == inputs.shape
def test_halo_attention():
input_shape = [2, 12, 16, 256]
out_shape = 384
out = attention_layers.halo_attention(tf.ones(input_shape), num_heads=4, out_shape=out_shape)
assert out.shape == [input_shape[0], input_shape[1], input_shape[2], out_shape]
def test_mhsa_with_multi_head_position_and_strides():
input_shape = [2, 28 * 28, 192]
strides = 2
output_dim = 384
out = attention_layers.mhsa_with_multi_head_position_and_strides(tf.ones(input_shape), output_dim=output_dim, num_heads=4, key_dim=16, strides=strides)
assert out.shape == [input_shape[0], input_shape[1] // strides // strides, output_dim]
def test_mhsa_with_relative_position_embedding():
input_shape = [2, 14, 16, 256]
out_shape = 384
out = attention_layers.mhsa_with_relative_position_embedding(tf.ones(input_shape), num_heads=4, out_shape=out_shape)
assert out.shape == [input_shape[0], input_shape[1], input_shape[2], out_shape]
def test_mhsa_with_multi_head_relative_position_embedding():
input_shape = [2, 14, 16, 256]
out_shape = 384
out = attention_layers.mhsa_with_multi_head_relative_position_embedding(tf.ones(input_shape), num_heads=4, out_shape=out_shape)
assert out.shape == [input_shape[0], input_shape[1], input_shape[2], out_shape]
def test_mixer_block():
input_shape = [2, 28 * 28, 192]
out = attention_layers.mixer_block(tf.ones(input_shape), tokens_mlp_dim=14 * 14, channels_mlp_dim=9 * 4)
assert out.shape == input_shape
def test_mlp_block():
input_shape = [2, 28 * 28, 192]
out = attention_layers.mlp_block(tf.ones(input_shape), hidden_dim=9 * 4)
assert out.shape == input_shape
def test_MultiHeadPositionalEmbedding():
aa = attention_layers.MultiHeadPositionalEmbedding()
input_shape = [2, 8, 16, 49]
assert aa(tf.ones(input_shape)).shape == input_shape
def test_MultiHeadRelativePositionalEmbedding():
aa = attention_layers.MultiHeadRelativePositionalEmbedding()
input_shape = [2, 8, 29 * 29 + 1, 29 * 29 + 1]
assert aa(tf.ones(input_shape)).shape == input_shape
def test_outlook_attention():
input_shape = [2, 28, 28, 192]
out = attention_layers.outlook_attention(tf.ones(input_shape), embed_dim=192, num_heads=4)
assert out.shape == input_shape
def test_outlook_attention_simple():
input_shape = [2, 28, 28, 192]
out = attention_layers.outlook_attention_simple(tf.ones(input_shape), embed_dim=192, num_heads=4)
assert out.shape == input_shape
def test_PositionalEmbedding():
aa = attention_layers.PositionalEmbedding()
input_shape = [2, 8, 16, 49]
assert aa(tf.ones(input_shape)).shape == input_shape
def test_RelativePositionalEmbedding():
aa = attention_layers.RelativePositionalEmbedding()
hh = pos_hh = 14
ww = pos_ww = 16
input_shape = [2, 4, hh, ww, 32]
assert aa(tf.ones(input_shape)).shape == [input_shape[0], input_shape[1], hh, ww, pos_hh, pos_ww]
def test_rsoftmax():
input_shape = [2, 1, 1, 49 * 2]
out = attention_layers.rsoftmax(tf.ones(input_shape), groups=2)
assert out.shape == input_shape
def test_ScaledStandardizedConv2D():
filters = 64
aa = attention_layers.ScaledStandardizedConv2D(filters=filters, kernel_size=3, padding="SAME")
input_shape = [2, 28, 28, 32]
assert aa(tf.ones(input_shape)).shape == [*input_shape[:3], filters]
def test_se_module():
input_shape = [2, 28, 28, 192]
out = attention_layers.se_module(tf.ones(input_shape), se_ratio=0.25)
assert out.shape == input_shape
def test_spatial_gating_block():
input_shape = [2, 28 * 28, 192]
out = attention_layers.spatial_gating_block(tf.ones(input_shape))
assert out.shape == [*input_shape[:2], input_shape[-1] // 2]
def test_split_attention_conv2d():
input_shape = [2, 28, 28, 192]
filters = 384
out = attention_layers.split_attention_conv2d(tf.ones(input_shape), filters=filters)
assert out.shape == [*input_shape[:3], filters]
def test_CompatibleExtractPatches():
inputs = tf.random.uniform([1, 64, 27, 192])
pad_inputs = tf.pad(inputs, [[0, 0], [1, 1], [1, 1], [0, 0]])
pathes = tf.image.extract_patches(pad_inputs, [1, 3, 3, 1], [1, 2, 2, 1], [1, 1, 1, 1], padding="VALID")
patches_by_conv2d = attention_layers.CompatibleExtractPatches(sizes=3, strides=2, rates=1, padding="SAME")(inputs)
tf.assert_less(tf.abs(pathes - patches_by_conv2d), 1e-7)
patches_1 = attention_layers.CompatibleExtractPatches(sizes=3, strides=2, rates=1, padding="SAME", compressed=False)(inputs)
patches_tpu_1 = attention_layers.CompatibleExtractPatches(3, 2, 1, padding="SAME", compressed=False, force_conv=True)(inputs)
tf.assert_less(tf.abs(patches_1 - patches_tpu_1), 1e-7)
patches_2 = attention_layers.CompatibleExtractPatches(sizes=3, strides=2, rates=1, padding="SAME", compressed=True)(inputs)
patches_tpu_2 = attention_layers.CompatibleExtractPatches(3, 2, 1, padding="SAME", compressed=True, force_conv=True)(inputs)
tf.assert_less(tf.abs(patches_2 - patches_tpu_2), 1e-7)
def test_ZeroInitGain():
aa = attention_layers.ZeroInitGain()
input_shape = [2, 28, 28, 32]
assert aa(tf.ones(input_shape)).shape == input_shape
|
import unittest
import torch
import math
from model_utils_torch.ops import *
from model_utils_torch.more_ops.multiple_pad import test_center_multiple_pad
class TestOps(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.test_data_2d = torch.randn(5, 10, 16, 16)
def test_channel_shuffle(self):
a = torch.arange(4).reshape(1, 4, 1, 1)
y = channel_shuffle(a, 2).reshape(-1)
self.assertEqual((0, 2, 1, 3), tuple(y))
def test_resize_ref(self):
a = torch.randn(5, 10, 32, 32)
y = resize_ref(self.test_data_2d, a)
self.assertEqual((5, 10, 32, 32), tuple(y.shape))
def test_add_coord(self):
y = add_coord(self.test_data_2d)
self.assertEqual((5, 12, 16, 16), tuple(y.shape))
def test_pixelwise_norm(self):
y = pixelwise_norm(self.test_data_2d)
self.assertEqual((5, 10, 16, 16), tuple(y.shape))
def test_flatten(self):
y = flatten(self.test_data_2d)
self.assertEqual((5, 2560), tuple(y.shape))
def test_adaptive_instance_normalization(self):
style = torch.randn(5, 10, 1, 1)
y = adaptive_instance_normalization(self.test_data_2d, style)
self.assertEqual((5, 10, 16, 16), tuple(y.shape))
def test_minibatch_stddev(self):
y = minibatch_stddev(self.test_data_2d, 5, 1)
self.assertEqual((5, 11, 16, 16), tuple(y.shape))
def test_pixelshuffle(self):
a = torch.randn(5, 16, 32, 32)
y = pixelshuffle(a, (2, 2))
y2 = F.pixel_shuffle(a, 2)
b = torch.equal(y, y2)
self.assertTrue(b)
def test_pixelshuffle_invert(self):
a = torch.randn(5, 16, 32, 32)
x = F.pixel_shuffle(a, 2)
y = pixelshuffle_invert(x, (2, 2))
b = torch.equal(a, y)
self.assertTrue(b)
def test_one_hot(self):
# dim == -1
a = torch.randint(0, 100, [100, 100, 10])
arr = one_hot(a, 100, dtype=torch.long)
restruct_a = one_hot_invert(arr)
b = torch.all(a == restruct_a)
self.assertTrue(b)
# dim != -1
a = torch.randint(0, 100, [100, 100, 10])
arr = one_hot(a, 100, dim=1, dtype=torch.long)
restruct_a = one_hot_invert(arr, dim=1)
b = torch.all(a == restruct_a)
self.assertTrue(b)
test_one_hot_invert = test_one_hot
def test_multiple_pad(self):
b = test_center_multiple_pad()
self.assertTrue(b)
|
import datetime
from django.utils.timezone import utc
from django.shortcuts import render, get_object_or_404, redirect
from django.urls import reverse
from django.contrib.auth.decorators import login_required
from .forms import OptionForm, TitleForm
from .models import Emails, Options, Titles, Voted
from django.contrib import messages
import time
from django.core.mail import send_mail
from django.conf import settings
# def SendResults(title):
# if title.emailed == '0' and title.published == '1':
# title.emailed = 1
# title.save()
# voted = Voted.objects.filter(title = title)
# if len(voted) != 0:
# for email in voted:
# send_mail(
# 'Results for '+str(title),
# 'The results for '+str(title)+'\nHave been declared at :'+str(settings.MY_SITE_NAME)+'\nCheck it out \nHear '+str(settings.MY_SITE_NAME)+str(reverse('pols', args=[title.id])),
# from_email = settings.EMAIL_HOST_USER,
# recipient_list=[email.email],
# fail_silently=False,
# )
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
def home(request):
titles = Titles.objects.filter(published ='1').order_by('-id')
data = {'title':'Home', 'titles':titles}
return render(request, 'VotingApp/home.html', data)
def TitleEdit(request,id):
if request.method == "POST":
title = get_object_or_404(Titles, id=id)
if title.published == '0':
d=1
title.title = request.POST['title']
try:
days = int(request.POST['end_in'])
title.published =1
date = utc.localize(datetime.datetime.today()) + datetime.timedelta(days=days)
title.end = date
title.save()
except:
title.save()
return redirect(reverse('titles',args=[id]))
def VoteView(request, id):
data = {}
d=0
title = get_object_or_404(Titles, id=id)
now = utc.localize(datetime.datetime.utcnow())
options = Options.objects.filter(title=title)
data = {"options": options, "title": title}
if title.published == '0':
data.update({'error':'Preview Mode'})
if title.user == request.user:
data.update({'this':'true'})
return render(request, 'VotingApp/index.html', data)
title.end = title.end+datetime.timedelta(hours=6) - datetime.timedelta(minutes=30)
if title.end <= now:
# SendResults(title)
data.update({'error':'Polls have ended results are'})
return render(request, 'VotingApp/index.html', data)
# now = now.replace(tzinfo=utc).strftime('%D: %H:%M:%S')
time_left = (title.end-now-datetime.timedelta(hours=5)-datetime.timedelta(minutes=30))
data.update({"time_left" : str(time_left)[:-7]})
if request.method == "POST" and d == 0:
user_email = get_client_ip(request)
if Emails.objects.filter(email=user_email).exists():
newemail = Emails.objects.filter(email=user_email)[0]
else:
newemail = Emails.objects.create()
newemail.email = user_email
newemail.save()
emailss = Voted.objects.filter(email=newemail)
if emailss.filter(title=title).exists():
data.update({'error' : "You have already placed a vote !!"})
return render(request, 'VotingApp/index.html', data)
instance = Options.objects.get(name=request.POST['name'])
instance.vote +=1
instance.save()
Voted.objects.create(email=newemail, title=title)
data.update({'message' : "Suckessfully Voted for "+ request.POST['name']})
return render(request, 'VotingApp/index.html', data)
@login_required
def PanelView(request):
r_message = messages.get_messages(request)
data = {"title":"DashBoard", 'r_message':r_message}
if request.method == "POST":
name = request.POST['name']
# days = request.POST['end_in']
# days = int(days)
time = datetime.datetime.today() + datetime.timedelta(days=1000)
Titles.objects.create(user=request.user, title=name, end=time)
titles = Titles.objects.filter(user=request.user)
titles = titles.order_by('-id')
data.update({'titles':titles})
return render(request, 'VotingApp/panel.html', data)
@login_required
def TitlesView(request, id):
r_message = messages.get_messages(request)
title = get_object_or_404(Titles, id=id)
data = {'title':title}
if title.user == request.user:
options = Options.objects.filter(title=title).order_by('-id')
data.update({'object':title, 'options':options})
if title.published == '0':
if request.method == "POST":
form = OptionForm(request.POST, request.FILES)
form.title = title
if form.is_valid():
form.save()
else:
print(form)
print("not saved")
# print(str(request.PUT))
# if request.method == "POST":
# pass
# print(str(request.POST))
else:
data.update({"time_left" : str(title.end)[:-7]})
return render(request, 'VotingApp/title_detail.html', data)
else:
return redirect(reverse('pols', args=[id]))
@login_required
def DeleteTitle(request, id):
title = get_object_or_404(Titles, id=id)
if title.user == request.user:
title.delete()
return redirect('dashboard')
def tester(request, id):
form = OptionForm()
return render(request,'tester/t.html', {'form':form})
@login_required
def EditOption(request, id):
option = get_object_or_404(Options, id=id)
if option.title.user == request.user:
if option.title.published == '0':
print(option.id)
if request.method == "POST":
form = OptionForm(request.POST, request.FILES, instance=option)
form.title = option.title
if form.is_valid():
form.save()
else:
print('!!!!!!!!!!!!!!!!')
print(form)
print('!!!!!!!!!!!!!!!!')
messages.success(request, "Change Failed")
return redirect(reverse('titles', args=[option.title.id]))
@login_required
def DeleteOption(request, id):
option = get_object_or_404(Options, id=id)
if option.title.user == request.user:
option.delete()
return redirect(reverse('titles', args=[option.title.id]))
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from ..helpers import HEADERS
from base_scraper import BaseScraper
import lxml.html
import requests
import time
from unicodedata import normalize
class IMDB(BaseScraper):
BASE_URL = "http://www.imdb.com"
SEARCH_PATH = "/find?q="
URL_END = "&s=all"
def __init__(self):
pass
def construct_search_url(self, title):
""" Construct search results url for specified title """
safe_title = normalize("NFC", title).replace(" ", "+").replace("&", "%26").replace("?", "%3F").lower()
return "{}{}{}{}".format(IMDB.BASE_URL, IMDB.SEARCH_PATH, safe_title, IMDB.URL_END)
def get_title(self, xml_doc):
try:
return xml_doc.xpath('//div[@class="title_block"]//h1/text()')[0].strip()
except IndexError:
return ''
def get_alternative_title(self, xml_doc):
try:
return xml_doc.xpath('//div[@class="title_wrapper"]//div[@class="originalTitle"]/text()')
except IndexError:
return ''
def get_description(self, xml_doc):
try:
return xml_doc.xpath('//div[@class="summary_text"]')[0].text_content().strip().replace('See full summary', '').replace('»', '').strip()
except IndexError:
return ''
def get_director(self, xml_doc):
try:
return map(lambda x: x.text, xml_doc.xpath('//div[@class="plot_summary_wrapper"]//div[@class="credit_summary_item"][1]//a'))
except IndexError:
return ''
def get_rating(self, xml_doc):
try:
return xml_doc.xpath('//div[@class="ratingValue"]/strong/span/text()')[0].strip()
except IndexError:
return ''
def get_genres(self, xml_doc):
try:
return xml_doc.xpath('//div[@class="titleBar"]//div[@class="title_wrapper"]/div[@class="subtext"]/a/text()')[0:-1]
except IndexError:
return ''
def get_votes(self, xml_doc):
try:
return xml_doc.xpath('//div[@class="imdbRating"]/a/span/text()')[0].strip()
except IndexError:
return ''
def get_running_time(self, xml_doc):
try:
return xml_doc.xpath('//div[@class="titleBar"]//div[@class="title_wrapper"]//time/text()')[0].strip()
except IndexError:
return ''
def get_content_rating(self, xml_doc):
try:
return xml_doc.xpath('//div[@class="titleBar"]/div[2]/div[@class="subtext"]/text()')[0].strip()
except IndexError:
return ''
def get_stars(self, xml_doc):
try:
res = map(lambda x: x.text, xml_doc.xpath('//div[@class="plot_summary_wrapper"]//div[@class="credit_summary_item"][3]//a'))[0:-1]
if not res:
return map(lambda x: x.text, xml_doc.xpath('//div[@class="plot_summary_wrapper"]//div[@class="credit_summary_item"][2]//a'))[0:-1]
except IndexError:
try:
return xml_doc.xpath('//div[@class="plot_summary_wrapper"]/div[1]/div[3]/span/a/span/text()')
except IndexError:
return ''
def get_languages(self, xml_doc):
try:
if xml_doc.xpath('//*[@id="titleDetails"]/div[3]/h4/text()') == ['Language:']:
return xml_doc.xpath('//*[@id="titleDetails"]/div[3]/a/text()')
else:
return xml_doc.xpath('//*[@id="titleDetails"]/div[2]/a/text()')
except IndexError:
return ''
def get_image_url(self, xml_doc):
try:
return xml_doc.xpath('//*[@class="poster"]/a/img/@src')[0]
except IndexError:
return ''
# Movie specific functions
def get_movie_year(self, xml_doc):
try:
return xml_doc.xpath('//*[@class="title_wrapper"]//span[@id="titleYear"]/a/text()')[0]
except IndexError:
return ''
def get_awards(self, xml_doc):
try:
awards = xml_doc.xpath('//*[@id="titleAwardsRanks"]/span[@class="awards-blurb"]/b/text()')[0].strip()
if not "oscar" in awards.lower():
return ''
else:
if awards[-1] == ".":
return " ".join(xml_doc.xpath('//div[@id="titleAwardsRanks"]//span[@class="awards-blurb"]/b/text()')[0].strip().split())
try:
return IMDB.BASE_URL + xml_doc.xpath('//*[@id="titleAwardsRanks"]/span[@class="see-more inline"]/a/@href')[0]
except IndexError:
return ''
except IndexError:
return ''
# Series Specific Functions
def get_series_year(self, xml_doc):
try:
return xml_doc.xpath('//div[@class="title_wrapper"]//div[@class="subtext"]/a/text()')[-1].strip().split('(')[-1].split(')')[0].strip()
except IndexError:
return ''
def get_creator(self, xml_doc):
try:
return map(lambda x: x.text, xml_doc.xpath('//div[@class="plot_summary_wrapper"]//div[@class="credit_summary_item"][1]//a'))
except IndexError:
return ''
def get_series_stars(self, xml_doc):
try:
cast = map(lambda x: x.text, xml_doc.xpath('//div[@class="plot_summary_wrapper"]//div[@class="credit_summary_item"][2]//a'))[0:-1]
if cast:
return cast
else:
# Some series do not list a creator, so cast is the first element in the credit summary pane at the top of the page
# The better solution is to search for the <h4> matching the term "Cast" and then take the next 3 elements
return xml_doc.xpath('//div[@class="plot_summary_wrapper"]//div[@class="credit_summary_item"][1]//a/text()')[0:3]
except IndexError:
return ''
# Full Response Payloads
def get_search_page(self, asset):
"""
Get search page listing all matching titles (from which the url of the title will be extracted)
"""
search_url = self.construct_search_url(asset)
return lxml.html.document_fromstring(requests.get(search_url, headers=HEADERS).content)
def get_movie_page_url(self, title):
"""
return URL associated with movie page by parsing search page DOM
return None if no results are found
"""
invalid_results = ["(TV Episode)", "(TV Series)", "(TV Mini-Series)", "(Short)", "(Video Game)"]
search_page = self.get_search_page(title)
try:
for index, section in enumerate(search_page.xpath('//*[@id="main"]/div[1]/div')):
if len(section.xpath('h3/text()')) > 0:
# Find the Div associated with Titles (rather than Characters, etc)
if section.xpath('h3/text()')[0] == "Titles":
# Select first in list which doesn't contain invalid_results
for index, list_title in enumerate(search_page.xpath('//*[@id="main"]/div[1]/div[2]/table[1]/tr')):
if not any(x in list_title.text_content() for x in invalid_results):
endpoint = search_page.xpath('//*[@id="main"]/div[1]/div[2]/table[1]/tr[%i]/td/a' %(index+1))[0].attrib['href']
return IMDB.BASE_URL + endpoint
except IndexError:
return
def get_series_page_url(self, title):
"""
return URL associated with series page by parsing search page DOM
return None if no results are found
"""
valid_results = ["(TV Series)", "(TV Mini-Series)"]
search_page = self.get_search_page(title)
try:
for index, section in enumerate(search_page.xpath('//*[@id="main"]/div[1]/div')):
if len(section.xpath('h3/text()')) > 0:
# Find the Div associated with Titles (rather than Characters, etc)
if section.xpath('h3/text()')[0] == "Titles":
# Select first in list which doesn't contain invalid_results
for index, list_title in enumerate(search_page.xpath('//*[@id="main"]/div[1]/div[2]/table[1]/tr')):
if any(x in list_title.text_content() for x in valid_results):
# Some items listed as "TV Episode" also contain a link with the term "TV Series" below
if "(TV Episode)" not in list_title.text_content():
endpoint = search_page.xpath('//*[@id="main"]/div[1]/div[2]/table[1]/tr[%i]/td/a' %(index+1))[0].attrib['href']
return IMDB.BASE_URL + endpoint
except IndexError:
return None
def get_movie_details(self, movie, movie_url):
""" Scrape movie page for attributes specified below """
if movie_url != None:
movie_page = lxml.html.document_fromstring(requests.get(movie_url, headers=HEADERS).content)
return {
'url': movie_url,
'info_retrieved': time.strftime("%Y-%m-%d"),
'title': self.get_title(movie_page),
'alternative_title': self.get_alternative_title(movie_page),
'year': self.get_movie_year(movie_page),
'description': self.get_description(movie_page),
'director': self.get_director(movie_page),
'stars': self.get_stars(movie_page),
'genre': self.get_genres(movie_page),
'rating': self.get_rating(movie_page),
'votes': self.get_votes(movie_page),
'running_time': self.get_running_time(movie_page),
'languages': self.get_languages(movie_page),
'content_rating': self.get_content_rating(movie_page),
'awards': self.get_awards(movie_page),
'image_url': self.get_image_url(movie_page),
}
def get_series_details(self, series, series_url):
""" Scrape series page for attributes specified below """
if series_url != None:
series_page = lxml.html.document_fromstring(requests.get(series_url, headers=HEADERS).content)
return {
'url': series_url,
'info_retrieved': time.strftime("%Y-%m-%d"),
'title': self.get_title(series_page),
'year': self.get_series_year(series_page),
'description': self.get_description(series_page),
'creator': self.get_creator(series_page),
'stars': self.get_series_stars(series_page),
'genre': self.get_genres(series_page),
'rating': self.get_rating(series_page),
'votes': self.get_votes(series_page),
'running_time': self.get_running_time(series_page),
'languages': self.get_languages(series_page),
'content_rating': self.get_content_rating(series_page),
'image_url': self.get_image_url(series_page),
}
|
#
#
#ok get the csv files to text
# do it for all (use blob)
#FAAACCCKKKKKKK
import csv
import os
def nameG(a, b): #get the file's names which are going to deal with
c = []
for i in range(b):
if i==0:
continue
if i < 10:
c.append(a +"tobacco_plant00"+ str(i)+"_bbox.csv")
if i > 10 and i <100:
c.append(a+"tobacco_plant0"+str(i)+"_bbox.csv")
if i > 100:
c.append(a+"tobacco_plant"+str(i)+"_bbox.csv")
return c
dataPath=Path='/home/csse/DATASETS/DataSets/mtcnn-head-detection-master'
#dataPath = "/home/caleb/Downloads/DataSets/plantDB/Phenotyping_Leaf_detection_dataset/Plant_Phenotyping_Datasets/Plant_Phenotyping_Datasets/Plant/Tobacco/"
outPath= "/home/caleb/Downloads/Datasets/plantDB/Phenotyping_Leaf_detection_dataset/"
#csv_file = raw_input(dataPath +"ara2012_plant001_bbox.csv")
txt_file = "/home/caleb/Downloads/DataSets/plantDB/Phenotyping_Leaf_detection_dataset/bbox_all_tobacco.txt"
fileNames = nameG(dataPath,121)
print(fileNames)
with open(txt_file, "w") as txtOut:
for i in fileNames:
with open(i,"r") as csvIn:
txtOut.write(i[0:len(i)-8]+"rgb.png")
txtOut.write(" ")
[txtOut.write(" ".join(row)+"") for row in csv.reader(csvIn)]
txtOut.write("\n")
txtOut.close()
|
from django.shortcuts import render
def admin_home(request):
return render(request, 'admin_template/admin_home.html')
|
from __future__ import annotations
from ..expressions import Expression
class ExpressionVisitor:
def __init__(self):
self.visited = {}
self._top_level = True
def visit(self, expression):
is_top_level = False
if self._top_level:
is_top_level = True
self._top_level = False
if expression not in self.visited:
method_name = "visit_%s" % expression.__class__.__name__
visitor = getattr(self, method_name, self.generic_visit)
self.visited[expression] = visitor(expression)
if is_top_level:
self._top_level = True
return self.visited[expression]
def generic_visit(self, expression):
for value in expression.__dict__.values():
if isinstance(value, Expression):
self.visit(value)
elif isinstance(value, (list, tuple, set)):
for sub_value in value:
if isinstance(sub_value, Expression):
self.visit(sub_value)
return expression
__all__ = ["ExpressionVisitor"]
|
import numpy as np
def merge_colinear(x, y):
"""Processes boundary coordinates in polyline with vertices X, Y to remove
redundant colinear points. Polyline is not assumed to be open or closed.
Parameters
----------
x : array_like
One dimensional array of horizontal boundary coordinates.
y : array_like
One dimensional array of vertical boundary coordinates.
Returns
-------
xout : array_like
X with colinear boundary points removed.
yout : array_like
Y with colinear boundary points removed.
"""
# compute boundary differences
dX = np.diff(x)
dY = np.diff(y)
# detect and delete stationary repeats
Repeats = np.argwhere((dX == 0) & (dY == 0))
np.delete(x, Repeats)
np.delete(y, Repeats)
np.delete(dX, Repeats)
np.delete(dY, Repeats)
# calculate slope transitions
slope = dY / dX
# find transitions
dslope = np.diff(slope)
dslope[np.isnan(dslope)] = 0
transitions = np.argwhere(dslope != 0)
# construct merged sequences
xout = np.append(x[0], x[transitions + 1])
yout = np.append(y[0], y[transitions + 1])
xout = np.append(xout, x[-1])
yout = np.append(yout, y[-1])
return xout, yout
|
from typing import Tuple
import ema_workbench
import LESO
import json
from tinydb import TinyDB
import pandas as pd
from LESO import AttrDict
def load_ema_leso_results(
run_id: int, exp_prefix: str,
results_folder: str, return_db_as_df=True,
exclude_solver_errors = True,
) -> Tuple[pd.DataFrame, dict, pd.DataFrame]:
"""Small helper function to load results easily from the document structure"""
ema_results = f"{exp_prefix}_ema_results_{run_id}.tar.gz"
experiments, outcomes = ema_workbench.load_results(results_folder / ema_results)
db_file = f"{exp_prefix}_db{run_id}.json"
db = TinyDB(results_folder / db_file)
if return_db_as_df:
db = convert_db_to_df(db)
if exclude_solver_errors:
db = db[db.solver_status == "ok"]
return experiments, outcomes, db
def convert_db_to_df(db: TinyDB):
"""Note that this is not stable for non-structured (e.g. document-type) dbs"""
df = pd.DataFrame(
{key: [document.get(key, None) for document in db] for key in db.all()[0]}
)
return df
def annualized_cost(component: LESO.Component, system: LESO.System) -> float:
"""Calculate the annualized cost of a LESO component"""
LESO.finance.set_finance_variables(component, system)
return component.capex * component.crf + component.opex
def quick_lcoe(component: LESO.Component, system: LESO.System) -> float:
"""Calculates the LCOE based on annualized cost and yearly production"""
return annualized_cost(component, system) / component.state.power.sum()
def open_leso_experiment_file(filepath: str) -> AttrDict:
"""Open a LESO experiment file (json write-out) and return an AttrDict"""
with open(filepath, "r") as infile:
di = AttrDict(json.load(infile))
return di
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ScheduleResource']
class ScheduleResource(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
daily_recurrence: Optional[pulumi.Input[pulumi.InputType['DayDetailsArgs']]] = None,
hourly_recurrence: Optional[pulumi.Input[pulumi.InputType['HourDetailsArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
lab_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
provisioning_state: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'EnableStatus']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
task_type: Optional[pulumi.Input[Union[str, 'TaskType']]] = None,
time_zone_id: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
weekly_recurrence: Optional[pulumi.Input[pulumi.InputType['WeekDetailsArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A schedule.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['DayDetailsArgs']] daily_recurrence: The daily recurrence of the schedule.
:param pulumi.Input[pulumi.InputType['HourDetailsArgs']] hourly_recurrence: The hourly recurrence of the schedule.
:param pulumi.Input[str] id: The identifier of the resource.
:param pulumi.Input[str] lab_name: The name of the lab.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the resource.
:param pulumi.Input[str] provisioning_state: The provisioning status of the resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'EnableStatus']] status: The status of the schedule.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[Union[str, 'TaskType']] task_type: The task type of the schedule.
:param pulumi.Input[str] time_zone_id: The time zone id.
:param pulumi.Input[str] type: The type of the resource.
:param pulumi.Input[pulumi.InputType['WeekDetailsArgs']] weekly_recurrence: The weekly recurrence of the schedule.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['daily_recurrence'] = daily_recurrence
__props__['hourly_recurrence'] = hourly_recurrence
__props__['id'] = id
if lab_name is None and not opts.urn:
raise TypeError("Missing required property 'lab_name'")
__props__['lab_name'] = lab_name
__props__['location'] = location
__props__['name'] = name
__props__['provisioning_state'] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['status'] = status
__props__['tags'] = tags
__props__['task_type'] = task_type
__props__['time_zone_id'] = time_zone_id
__props__['type'] = type
__props__['weekly_recurrence'] = weekly_recurrence
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab/v20150521preview:ScheduleResource"), pulumi.Alias(type_="azure-native:devtestlab:ScheduleResource"), pulumi.Alias(type_="azure-nextgen:devtestlab:ScheduleResource"), pulumi.Alias(type_="azure-native:devtestlab/latest:ScheduleResource"), pulumi.Alias(type_="azure-nextgen:devtestlab/latest:ScheduleResource"), pulumi.Alias(type_="azure-native:devtestlab/v20160515:ScheduleResource"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:ScheduleResource"), pulumi.Alias(type_="azure-native:devtestlab/v20180915:ScheduleResource"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:ScheduleResource")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ScheduleResource, __self__).__init__(
'azure-native:devtestlab/v20150521preview:ScheduleResource',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ScheduleResource':
"""
Get an existing ScheduleResource resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["daily_recurrence"] = None
__props__["hourly_recurrence"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["status"] = None
__props__["tags"] = None
__props__["task_type"] = None
__props__["time_zone_id"] = None
__props__["type"] = None
__props__["weekly_recurrence"] = None
return ScheduleResource(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dailyRecurrence")
def daily_recurrence(self) -> pulumi.Output[Optional['outputs.DayDetailsResponse']]:
"""
The daily recurrence of the schedule.
"""
return pulumi.get(self, "daily_recurrence")
@property
@pulumi.getter(name="hourlyRecurrence")
def hourly_recurrence(self) -> pulumi.Output[Optional['outputs.HourDetailsResponse']]:
"""
The hourly recurrence of the schedule.
"""
return pulumi.get(self, "hourly_recurrence")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
The status of the schedule.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Output[Optional[str]]:
"""
The task type of the schedule.
"""
return pulumi.get(self, "task_type")
@property
@pulumi.getter(name="timeZoneId")
def time_zone_id(self) -> pulumi.Output[Optional[str]]:
"""
The time zone id.
"""
return pulumi.get(self, "time_zone_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="weeklyRecurrence")
def weekly_recurrence(self) -> pulumi.Output[Optional['outputs.WeekDetailsResponse']]:
"""
The weekly recurrence of the schedule.
"""
return pulumi.get(self, "weekly_recurrence")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
from tensorflow.keras.models import Sequential
model3 = Sequential([
embedding_layer,
Flatten(),
])
labels_to_encode = np.array([[3]])
print(model3.predict(labels_to_encode))
|
"""Constant values for the modules"""
# system constants
APP_NAME = "SelfStabilizingReconfiguration"
RUN_SLEEP = 1
INTEGRATION_RUN_SLEEP = 0.05
FD_SLEEP = 0.25
FD_TIMEOUT = 5
MAX_QUEUE_SIZE = 10 # Max allowed amount of messages in send queue
# FD
BEAT_THRESHOLD = 30 # Threshold for liveness, beat-variable
CNT_THRESHOLD = 20 # Threshold for progress, cnt-variable
NOT_PARTICIPANT = 'NOT_PARTICIPANT'
BOTTOM = 'BOTTOM'
DFLT_NTF = (0, BOTTOM)
RECSA_MSG_TYPE = 2
RECMA_MSG_TYPE = 3
# ABD
READ_REQUEST = "READ_REQUEST"
READ_CONFIRM = "READ_CONFIRM"
WRITE = "WRITE"
BOT = -1
NOT_SENT = "NOT_SENT"
NOT_ACKED = "NOT_ACKED"
ACKED = "ACKED"
WRITE_ACK = "WRITE_ACK"
READ_CONFIRM_ACK = "READ_CONFIRM_ACK"
READ_REQUEST_ACK = "READ_REQUEST_ACK"
|
from fastapi import APIRouter
from .api.endpoints import api_router
from .health.endpoints import health_router
base_router = APIRouter()
base_router.include_router(api_router, prefix='/api')
base_router.include_router(health_router, prefix='/health')
|
from SBaaS_base.postgresql_orm_base import *
#IS
#internal_standard
class internal_standard(Base):
#__table__ = make_table('internal_standard')
__tablename__ = 'internal_standard'
is_id = Column(Integer, nullable = False);
is_date = Column(DateTime, nullable = False);
experimentor_id = Column(String(50), nullable = False);
extraction_method_id = Column(String(50))
__table_args__ = (PrimaryKeyConstraint('is_id'),
ForeignKeyConstraint(['is_id'],['internal_standard_storage.is_id']),
)
def __init__(self,data_dict_I):
self.experimentor_id=data_dict_I['experimentor_id'];
self.is_id=data_dict_I['is_id'];
self.is_date=data_dict_I['is_date'];
self.extraction_method_id=data_dict_I['extraction_method_id'];
#internal_standard_storage
class internal_standard_storage(Base):
#__table__ = make_table('internal_standard_storage')
__tablename__ = 'internal_standard_storage'
is_id = Column(Integer, nullable = False);
concentration = Column(Float);
concentration_units = Column(String(10));
aliquots = Column(Integer);
aliquot_volume = Column(Float);
aliquot_volume_units = Column(String(10));
solvent = Column(String(100));
ph = Column(Float);
box = Column(Integer);
posstart = Column(Integer);
posend = Column(Integer)
__table_args__ = (PrimaryKeyConstraint('is_id'),
)
def __init__(self,data_dict_I):
self.ph=data_dict_I['ph'];
self.is_id=data_dict_I['is_id'];
self.concentration=data_dict_I['concentration'];
self.concentration_units=data_dict_I['concentration_units'];
self.aliquots=data_dict_I['aliquots'];
self.aliquot_volume=data_dict_I['aliquot_volume'];
self.posstart=data_dict_I['posstart'];
self.aliquot_volume_units=data_dict_I['aliquot_volume_units'];
self.posend=data_dict_I['posend'];
self.solvent=data_dict_I['solvent'];
self.box=data_dict_I['box'];
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle
import numpy as np
from PIL import Image
from paddleseg.cvlibs import manager
from paddleseg.transforms import Compose
@manager.DATASETS.add_component
class Dataset(paddle.io.Dataset):
"""
Pass in a custom dataset that conforms to the format.
Args:
transforms (list): Transforms for image.
dataset_root (str): The dataset directory.
num_classes (int): Number of classes.
mode (str): which part of dataset to use. it is one of ('train', 'val', 'test'). Default: 'train'.
train_path (str): The train dataset file. When mode is 'train', train_path is necessary.
The contents of train_path file are as follow:
image1.jpg ground_truth1.png
image2.jpg ground_truth2.png
val_path (str): The evaluation dataset file. When mode is 'val', val_path is necessary.
The contents is the same as train_path
test_path (str): The test dataset file. When mode is 'test', test_path is necessary.
The annotation file is not necessary in test_path file.
separator (str): The separator of dataset list. Default: ' '.
Examples:
import paddleseg.transforms as T
from paddleseg.datasets import Dataset
transforms = [T.RandomPaddingCrop(crop_size=(512,512)), T.Normalize()]
dataset_root = 'dataset_root_path'
train_path = 'train_path'
num_classes = 2
dataset = Dataset(transforms = transforms,
dataset_root = dataset_root,
num_classes = 2,
train_path = train_path,
mode = 'train')
"""
def __init__(self,
transforms,
dataset_root,
num_classes,
mode='train',
train_path=None,
val_path=None,
test_path=None,
separator=' ',
ignore_index=255):
self.dataset_root = dataset_root
self.transforms = Compose(transforms)
self.file_list = list()
mode = mode.lower()
self.mode = mode
self.num_classes = num_classes
self.ignore_index = ignore_index
if mode.lower() not in ['train', 'val', 'test']:
raise ValueError(
"mode should be 'train', 'val' or 'test', but got {}.".format(
mode))
if self.transforms is None:
raise ValueError("`transforms` is necessary, but it is None.")
self.dataset_root = dataset_root
if not os.path.exists(self.dataset_root):
raise FileNotFoundError('there is not `dataset_root`: {}.'.format(
self.dataset_root))
if mode == 'train':
if train_path is None:
raise ValueError(
'When `mode` is "train", `train_path` is necessary, but it is None.'
)
elif not os.path.exists(train_path):
raise FileNotFoundError(
'`train_path` is not found: {}'.format(train_path))
else:
file_path = train_path
elif mode == 'val':
if val_path is None:
raise ValueError(
'When `mode` is "val", `val_path` is necessary, but it is None.'
)
elif not os.path.exists(val_path):
raise FileNotFoundError(
'`val_path` is not found: {}'.format(val_path))
else:
file_path = val_path
else:
if test_path is None:
raise ValueError(
'When `mode` is "test", `test_path` is necessary, but it is None.'
)
elif not os.path.exists(test_path):
raise FileNotFoundError(
'`test_path` is not found: {}'.format(test_path))
else:
file_path = test_path
with open(file_path, 'r') as f:
for line in f:
items = line.strip().split(separator)
if len(items) != 2:
if mode == 'train' or mode == 'val':
raise ValueError(
"File list format incorrect! In training or evaluation task it should be"
" image_name{}label_name\\n".format(separator))
image_path = os.path.join(self.dataset_root, items[0])
label_path = None
else:
image_path = os.path.join(self.dataset_root, items[0])
label_path = os.path.join(self.dataset_root, items[1])
self.file_list.append([image_path, label_path])
def __getitem__(self, idx):
image_path, label_path = self.file_list[idx]
if self.mode == 'test':
im, im_info, _ = self.transforms(im=image_path)
im = im[np.newaxis, ...]
return im, im_info, image_path
elif self.mode == 'val':
im, im_info, _ = self.transforms(im=image_path)
im = im[np.newaxis, ...]
label = np.asarray(Image.open(label_path))
label = label[np.newaxis, np.newaxis, :, :]
return im, im_info, label
else:
im, im_info, label = self.transforms(
im=image_path, label=label_path)
return im, label
def __len__(self):
return len(self.file_list)
|
# coding: utf-8
"""
Flat API
The Flat API allows you to easily extend the abilities of the [Flat Platform](https://flat.io), with a wide range of use cases including the following: * Creating and importing new music scores using MusicXML, MIDI, Guitar Pro (GP3, GP4, GP5, GPX, GP), PowerTab, TuxGuitar and MuseScore files * Browsing, updating, copying, exporting the user's scores (for example in MP3, WAV or MIDI) * Managing educational resources with Flat for Education: creating & updating the organization accounts, the classes, rosters and assignments. The Flat API is built on HTTP. Our API is RESTful It has predictable resource URLs. It returns HTTP response codes to indicate errors. It also accepts and returns JSON in the HTTP body. The [schema](/swagger.yaml) of this API follows the [OpenAPI Initiative (OAI) specification](https://www.openapis.org/), you can use and work with [compatible Swagger tools](http://swagger.io/open-source-integrations/). This API features Cross-Origin Resource Sharing (CORS) implemented in compliance with [W3C spec](https://www.w3.org/TR/cors/). You can use your favorite HTTP/REST library for your programming language to use Flat's API. This specification and reference is [available on Github](https://github.com/FlatIO/api-reference). Getting Started and learn more: * [API Overview and interoduction](https://flat.io/developers/docs/api/) * [Authentication (Personal Access Tokens or OAuth2)](https://flat.io/developers/docs/api/authentication.html) * [SDKs](https://flat.io/developers/docs/api/sdks.html) * [Rate Limits](https://flat.io/developers/docs/api/rate-limits.html) * [Changelog](https://flat.io/developers/docs/api/changelog.html) # noqa: E501
OpenAPI spec version: 2.7.0
Contact: developers@flat.io
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class OrganizationInvitation(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'organization': 'str',
'organization_role': 'OrganizationRoles',
'custom_code': 'str',
'email': 'str',
'invited_by': 'str',
'used_by': 'str'
}
attribute_map = {
'id': 'id',
'organization': 'organization',
'organization_role': 'organizationRole',
'custom_code': 'customCode',
'email': 'email',
'invited_by': 'invitedBy',
'used_by': 'usedBy'
}
def __init__(self, id=None, organization=None, organization_role=None, custom_code=None, email=None, invited_by=None, used_by=None): # noqa: E501
"""OrganizationInvitation - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._organization = None
self._organization_role = None
self._custom_code = None
self._email = None
self._invited_by = None
self._used_by = None
self.discriminator = None
if id is not None:
self.id = id
if organization is not None:
self.organization = organization
if organization_role is not None:
self.organization_role = organization_role
if custom_code is not None:
self.custom_code = custom_code
if email is not None:
self.email = email
if invited_by is not None:
self.invited_by = invited_by
if used_by is not None:
self.used_by = used_by
@property
def id(self):
"""Gets the id of this OrganizationInvitation. # noqa: E501
The invitation unique identifier # noqa: E501
:return: The id of this OrganizationInvitation. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this OrganizationInvitation.
The invitation unique identifier # noqa: E501
:param id: The id of this OrganizationInvitation. # noqa: E501
:type: str
"""
self._id = id
@property
def organization(self):
"""Gets the organization of this OrganizationInvitation. # noqa: E501
The unique identifier of the Organization owning this class # noqa: E501
:return: The organization of this OrganizationInvitation. # noqa: E501
:rtype: str
"""
return self._organization
@organization.setter
def organization(self, organization):
"""Sets the organization of this OrganizationInvitation.
The unique identifier of the Organization owning this class # noqa: E501
:param organization: The organization of this OrganizationInvitation. # noqa: E501
:type: str
"""
self._organization = organization
@property
def organization_role(self):
"""Gets the organization_role of this OrganizationInvitation. # noqa: E501
:return: The organization_role of this OrganizationInvitation. # noqa: E501
:rtype: OrganizationRoles
"""
return self._organization_role
@organization_role.setter
def organization_role(self, organization_role):
"""Sets the organization_role of this OrganizationInvitation.
:param organization_role: The organization_role of this OrganizationInvitation. # noqa: E501
:type: OrganizationRoles
"""
self._organization_role = organization_role
@property
def custom_code(self):
"""Gets the custom_code of this OrganizationInvitation. # noqa: E501
Enrollment code to use when joining this organization # noqa: E501
:return: The custom_code of this OrganizationInvitation. # noqa: E501
:rtype: str
"""
return self._custom_code
@custom_code.setter
def custom_code(self, custom_code):
"""Sets the custom_code of this OrganizationInvitation.
Enrollment code to use when joining this organization # noqa: E501
:param custom_code: The custom_code of this OrganizationInvitation. # noqa: E501
:type: str
"""
self._custom_code = custom_code
@property
def email(self):
"""Gets the email of this OrganizationInvitation. # noqa: E501
The email address this invitation was sent to # noqa: E501
:return: The email of this OrganizationInvitation. # noqa: E501
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this OrganizationInvitation.
The email address this invitation was sent to # noqa: E501
:param email: The email of this OrganizationInvitation. # noqa: E501
:type: str
"""
self._email = email
@property
def invited_by(self):
"""Gets the invited_by of this OrganizationInvitation. # noqa: E501
The unique identifier of the User who created this invitation # noqa: E501
:return: The invited_by of this OrganizationInvitation. # noqa: E501
:rtype: str
"""
return self._invited_by
@invited_by.setter
def invited_by(self, invited_by):
"""Sets the invited_by of this OrganizationInvitation.
The unique identifier of the User who created this invitation # noqa: E501
:param invited_by: The invited_by of this OrganizationInvitation. # noqa: E501
:type: str
"""
self._invited_by = invited_by
@property
def used_by(self):
"""Gets the used_by of this OrganizationInvitation. # noqa: E501
The unique identifier of the User who used this invitation # noqa: E501
:return: The used_by of this OrganizationInvitation. # noqa: E501
:rtype: str
"""
return self._used_by
@used_by.setter
def used_by(self, used_by):
"""Sets the used_by of this OrganizationInvitation.
The unique identifier of the User who used this invitation # noqa: E501
:param used_by: The used_by of this OrganizationInvitation. # noqa: E501
:type: str
"""
self._used_by = used_by
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OrganizationInvitation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-13 18:34
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField(help_text='Unique value for product page URL, created from name.', unique=True)),
('description', models.TextField()),
('is_active', models.BooleanField(default=True)),
('meta_keywords', models.CharField(help_text='Comma-delimited set of SEO keywords for meta tag', max_length=255, verbose_name='Meta keywords')),
('meta_description', models.CharField(help_text='Content of description meta tag', max_length=255, verbose_name='Meta Description')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['name'],
'db_table': 'categories',
'verbose_name_plural': 'Categories',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True)),
('slug', models.SlugField(help_text='Unique value for product page URL, created from name.', max_length=255, unique=True)),
('brand', models.CharField(max_length=50)),
('sku', models.CharField(max_length=50)),
('price', models.DecimalField(decimal_places=2, max_digits=15)),
('old_price', models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=15)),
('image', models.CharField(max_length=50)),
('is_active', models.BooleanField(default=True)),
('is_bestseller', models.BooleanField(default=False)),
('is_featured', models.BooleanField(default=False)),
('quantity', models.IntegerField()),
('description', models.TextField()),
('meta_keywords', models.CharField(help_text='Comma-delimited set of SEO keywords for meta tag', max_length=255, verbose_name='Meta keywords')),
('meta_description', models.CharField(help_text='Content of description meta tag', max_length=255, verbose_name='Meta Description')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('categories', models.ManyToManyField(to='catalog.Category')),
],
options={
'ordering': ['-created_at'],
'db_table': 'products',
},
),
]
|
"""Reader for the GMSH file format."""
from __future__ import division, absolute_import
__copyright__ = "Copyright (C) 2009 Xueyu Zhu, Andreas Kloeckner"
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from six.moves import range
from functools import reduce
import numpy as np
#import numpy.linalg as la
from pytools import memoize_method, Record
from meshpy.gmsh import ( # noqa
ScriptSource, LiteralSource, FileSource, ScriptWithFilesSource)
__doc__ = """
.. exception:: GmshFileFormatError
Element types
-------------
.. autoclass:: GmshElementBase
Simplex Elements
^^^^^^^^^^^^^^^^
.. autoclass:: GmshSimplexElementBase
.. autoclass:: GmshPoint
.. autoclass:: GmshIntervalElement
.. autoclass:: GmshTriangularElement
.. autoclass:: GmshIncompleteTriangularElement
.. autoclass:: GmshTetrahedralElement
Tensor Product Elements
^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: GmshTensorProductElementBase
.. autoclass:: GmshQuadrilateralElement
.. autoclass:: GmshHexahedralElement
Receiver interface
------------------
.. autoclass:: GmshMeshReceiverBase
Receiver example implementation
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. autoclass:: GmshMeshReceiverNumPy
Reader
------
.. autoclass:: ScriptSource
.. autoclass:: FileSource
.. autoclass:: ScriptWithFilesSource
.. autofunction:: read_gmsh
.. autofunction:: generate_gmsh
"""
# {{{ tools
def generate_triangle_vertex_tuples(order):
yield (0, 0)
yield (order, 0)
yield (0, order)
def generate_triangle_edge_tuples(order):
for i in range(1, order):
yield (i, 0)
for i in range(1, order):
yield (order-i, i)
for i in range(1, order):
yield (0, order-i)
def generate_triangle_volume_tuples(order):
for i in range(1, order):
for j in range(1, order-i):
yield (j, i)
def generate_quad_vertex_tuples(dim, order):
from pytools import \
generate_nonnegative_integer_tuples_below
for tup in generate_nonnegative_integer_tuples_below(2, dim):
yield tuple(order * i for i in tup)
class LineFeeder:
def __init__(self, line_iterable):
self.line_iterable = iter(line_iterable)
self.next_line = None
def has_next_line(self):
if self.next_line is not None:
return True
try:
self.next_line = next(self.line_iterable)
except StopIteration:
return False
else:
return True
def get_next_line(self):
if self.next_line is not None:
nl = self.next_line
self.next_line = None
return nl.strip()
try:
nl = next(self.line_iterable)
except StopIteration:
raise GmshFileFormatError("unexpected end of file")
else:
return nl.strip()
# }}}
# {{{ element info
class GmshElementBase(object):
"""
.. automethod:: vertex_count
.. automethod:: node_count
.. automethod:: lexicographic_node_tuples
.. automethod:: get_lexicographic_gmsh_node_indices
.. method:: equidistant_unit_nodes
(Implemented by subclasses)
"""
def __init__(self, order):
self.order = order
# {{{ simplices
class GmshSimplexElementBase(GmshElementBase):
def vertex_count(self):
return self.dimensions + 1
@memoize_method
def node_count(self):
"""Return the number of interpolation nodes in this element."""
d = self.dimensions
o = self.order
from operator import mul
from pytools import factorial
return int(reduce(mul, (o + 1 + i for i in range(d)), 1) / factorial(d))
@memoize_method
def lexicographic_node_tuples(self):
"""Generate tuples enumerating the node indices present
in this element. Each tuple has a length equal to the dimension
of the element. The tuples constituents are non-negative integers
whose sum is less than or equal to the order of the element.
"""
from pytools import \
generate_nonnegative_integer_tuples_summing_to_at_most
result = list(
generate_nonnegative_integer_tuples_summing_to_at_most(
self.order, self.dimensions))
assert len(result) == self.node_count()
return result
@memoize_method
def get_lexicographic_gmsh_node_indices(self):
gmsh_tup_to_index = dict(
(tup, i)
for i, tup in enumerate(self.gmsh_node_tuples()))
return np.array([gmsh_tup_to_index[tup]
for tup in self.lexicographic_node_tuples()],
dtype=np.intp)
class GmshPoint(GmshSimplexElementBase):
dimensions = 0
@memoize_method
def gmsh_node_tuples(self):
return [()]
class GmshIntervalElement(GmshSimplexElementBase):
dimensions = 1
@memoize_method
def gmsh_node_tuples(self):
return [(0,), (self.order,), ] + [
(i,) for i in range(1, self.order)]
class GmshIncompleteTriangularElement(GmshSimplexElementBase):
dimensions = 2
def __init__(self, order):
self.order = order
@memoize_method
def gmsh_node_tuples(self):
result = []
for tup in generate_triangle_vertex_tuples(self.order):
result.append(tup)
for tup in generate_triangle_edge_tuples(self.order):
result.append(tup)
return result
class GmshTriangularElement(GmshSimplexElementBase):
dimensions = 2
@memoize_method
def gmsh_node_tuples(self):
return {
1: [
(0, 0), (1, 0), (0, 1),
],
2: [
(0, 0), (2, 0), (0, 2), (1, 0), (1, 1), (0, 1),
],
3: [
(0, 0), (3, 0), (0, 3), (1, 0), (2, 0), (2, 1), (1, 2), (0, 2),
(0, 1), (1, 1),
],
4: [
(0, 0), (4, 0), (0, 4), (1, 0), (2, 0), (3, 0), (3, 1), (2, 2),
(1, 3), (0, 3), (0, 2), (0, 1), (1, 1), (2, 1), (1, 2),
],
5: [
(0, 0), (5, 0), (0, 5), (1, 0), (2, 0), (3, 0), (4, 0), (4, 1),
(3, 2), (2, 3), (1, 4), (0, 4), (0, 3), (0, 2), (0, 1), (1, 1),
(3, 1), (1, 3), (2, 1), (2, 2), (1, 2),
],
}[self.order]
class GmshTetrahedralElement(GmshSimplexElementBase):
dimensions = 3
@memoize_method
def gmsh_node_tuples(self):
# gmsh's node ordering is on crack
return {
1: [(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)],
2: [
(0, 0, 0), (2, 0, 0), (0, 2, 0), (0, 0, 2), (1, 0, 0), (1, 1, 0),
(0, 1, 0), (0, 0, 1), (0, 1, 1), (1, 0, 1)],
3: [
(0, 0, 0), (3, 0, 0), (0, 3, 0), (0, 0, 3), (1, 0, 0), (2, 0, 0),
(2, 1, 0), (1, 2, 0), (0, 2, 0), (0, 1, 0), (0, 0, 2), (0, 0, 1),
(0, 1, 2), (0, 2, 1), (1, 0, 2), (2, 0, 1), (1, 1, 0), (1, 0, 1),
(0, 1, 1), (1, 1, 1)],
4: [
(0, 0, 0), (4, 0, 0), (0, 4, 0), (0, 0, 4), (1, 0, 0), (2, 0, 0),
(3, 0, 0), (3, 1, 0), (2, 2, 0), (1, 3, 0), (0, 3, 0), (0, 2, 0),
(0, 1, 0), (0, 0, 3), (0, 0, 2), (0, 0, 1), (0, 1, 3), (0, 2, 2),
(0, 3, 1), (1, 0, 3), (2, 0, 2), (3, 0, 1), (1, 1, 0), (1, 2, 0),
(2, 1, 0), (1, 0, 1), (2, 0, 1), (1, 0, 2), (0, 1, 1), (0, 1, 2),
(0, 2, 1), (1, 1, 2), (2, 1, 1), (1, 2, 1), (1, 1, 1)],
5: [
(0, 0, 0), (5, 0, 0), (0, 5, 0), (0, 0, 5), (1, 0, 0), (2, 0, 0),
(3, 0, 0), (4, 0, 0), (4, 1, 0), (3, 2, 0), (2, 3, 0), (1, 4, 0),
(0, 4, 0), (0, 3, 0), (0, 2, 0), (0, 1, 0), (0, 0, 4), (0, 0, 3),
(0, 0, 2), (0, 0, 1), (0, 1, 4), (0, 2, 3), (0, 3, 2), (0, 4, 1),
(1, 0, 4), (2, 0, 3), (3, 0, 2), (4, 0, 1), (1, 1, 0), (1, 3, 0),
(3, 1, 0), (1, 2, 0), (2, 2, 0), (2, 1, 0), (1, 0, 1), (3, 0, 1),
(1, 0, 3), (2, 0, 1), (2, 0, 2), (1, 0, 2), (0, 1, 1), (0, 1, 3),
(0, 3, 1), (0, 1, 2), (0, 2, 2), (0, 2, 1), (1, 1, 3), (3, 1, 1),
(1, 3, 1), (2, 1, 2), (2, 2, 1), (1, 2, 2), (1, 1, 1), (2, 1, 1),
(1, 2, 1), (1, 1, 2)],
}[self.order]
# }}}
# {{{ tensor product elements
class GmshTensorProductElementBase(GmshElementBase):
def vertex_count(self):
return 2**self.dimensions
@memoize_method
def node_count(self):
return (self.order+1) ** self.dimensions
@memoize_method
def lexicographic_node_tuples(self):
"""Generate tuples enumerating the node indices present
in this element. Each tuple has a length equal to the dimension
of the element. The tuples constituents are non-negative integers
whose sum is less than or equal to the order of the element.
"""
from pytools import \
generate_nonnegative_integer_tuples_below
result = list(
generate_nonnegative_integer_tuples_below(
self.order+1, self.dimensions))
assert len(result) == self.node_count()
return result
@memoize_method
def get_lexicographic_gmsh_node_indices(self):
gmsh_tup_to_index = dict(
(tup, i)
for i, tup in enumerate(self.gmsh_node_tuples()))
return np.array([gmsh_tup_to_index[tup]
for tup in self.lexicographic_node_tuples()],
dtype=np.intp)
class GmshQuadrilateralElement(GmshTensorProductElementBase):
dimensions = 2
@memoize_method
def gmsh_node_tuples(self):
# gmsh's node ordering is still on crack
return {
1: [(0, 0), (0, 1), (1, 1), (1, 0), ],
2: [
# start index 0: vertices
(0, 0), (0, 2), (2, 2), (2, 0),
# start index 4: edges
(1, 0), (2, 1), (1, 2), (0, 1),
# start index 8: volume
(1, 1)],
3: [
# vertices
(0, 0), (0, 3), (3, 3), (3, 0),
# edges
(1, 0), (2, 0),
(3, 1), (3, 2),
(1, 3), (2, 3),
(0, 1), (0, 2),
# volume
(1, 1), (2, 1),
(1, 2), (2, 2),
],
}[self.order]
class GmshHexahedralElement(GmshTensorProductElementBase):
dimensions = 3
@memoize_method
def gmsh_node_tuples(self):
# gmsh's node ordering is always on crack
# obtained by using the files in
# contrib/extract-gmsh-node-order
# with gmsh 2.7.1
return {
1: [
(0, 0, 0), (0, 1, 0), (1, 1, 0), (1, 0, 0),
(0, 0, 1), (0, 1, 1), (1, 1, 1), (1, 0, 1),
],
2: [
(0, 0, 0), (0, 0, 2), (2, 0, 2), (2, 0, 0),
(0, 2, 0), (0, 2, 2), (2, 2, 2), (2, 2, 0),
(0, 0, 1), (1, 0, 0), (0, 1, 0), (1, 0, 2),
(0, 1, 2), (2, 0, 1), (2, 1, 2), (2, 1, 0),
(0, 2, 1), (1, 2, 0), (1, 2, 2), (2, 2, 1),
(1, 0, 1), (0, 1, 1), (1, 1, 0), (1, 1, 2),
(2, 1, 1), (1, 2, 1), (1, 1, 1),
],
3: [
(0, 3, 3), (3, 3, 3), (3, 0, 3), (0, 0, 3),
(0, 3, 0), (3, 3, 0), (3, 0, 0), (0, 0, 0),
(1, 3, 3), (2, 3, 3), (0, 2, 3), (0, 1, 3),
(0, 3, 2), (0, 3, 1), (3, 2, 3), (3, 1, 3),
(3, 3, 2), (3, 3, 1), (2, 0, 3), (1, 0, 3),
(3, 0, 2), (3, 0, 1), (0, 0, 2), (0, 0, 1),
(1, 3, 0), (2, 3, 0), (0, 2, 0), (0, 1, 0),
(3, 2, 0), (3, 1, 0), (2, 0, 0), (1, 0, 0),
(1, 2, 3), (1, 1, 3), (2, 1, 3), (2, 2, 3),
(1, 3, 2), (2, 3, 2), (2, 3, 1), (1, 3, 1),
(0, 2, 2), (0, 2, 1), (0, 1, 1), (0, 1, 2),
(3, 2, 2), (3, 1, 2), (3, 1, 1), (3, 2, 1),
(2, 0, 2), (1, 0, 2), (1, 0, 1), (2, 0, 1),
(1, 2, 0), (2, 2, 0), (2, 1, 0), (1, 1, 0),
(1, 2, 2), (2, 2, 2), (2, 1, 2), (1, 1, 2),
(1, 2, 1), (2, 2, 1), (2, 1, 1), (1, 1, 1),
],
4: [
(4, 0, 0), (4, 4, 0), (0, 4, 0), (0, 0, 0),
(4, 0, 4), (4, 4, 4), (0, 4, 4), (0, 0, 4),
(4, 1, 0), (4, 2, 0), (4, 3, 0), (3, 0, 0),
(2, 0, 0), (1, 0, 0), (4, 0, 1), (4, 0, 2),
(4, 0, 3), (3, 4, 0), (2, 4, 0), (1, 4, 0),
(4, 4, 1), (4, 4, 2), (4, 4, 3), (0, 3, 0),
(0, 2, 0), (0, 1, 0), (0, 4, 1), (0, 4, 2),
(0, 4, 3), (0, 0, 1), (0, 0, 2), (0, 0, 3),
(4, 1, 4), (4, 2, 4), (4, 3, 4), (3, 0, 4),
(2, 0, 4), (1, 0, 4), (3, 4, 4), (2, 4, 4),
(1, 4, 4), (0, 3, 4), (0, 2, 4), (0, 1, 4),
(3, 1, 0), (1, 1, 0), (1, 3, 0), (3, 3, 0),
(2, 1, 0), (1, 2, 0), (2, 3, 0), (3, 2, 0),
(2, 2, 0), (4, 1, 1), (4, 3, 1), (4, 3, 3),
(4, 1, 3), (4, 2, 1), (4, 3, 2), (4, 2, 3),
(4, 1, 2), (4, 2, 2), (3, 0, 1), (3, 0, 3),
(1, 0, 3), (1, 0, 1), (3, 0, 2), (2, 0, 3),
(1, 0, 2), (2, 0, 1), (2, 0, 2), (3, 4, 1),
(1, 4, 1), (1, 4, 3), (3, 4, 3), (2, 4, 1),
(1, 4, 2), (2, 4, 3), (3, 4, 2), (2, 4, 2),
(0, 3, 1), (0, 1, 1), (0, 1, 3), (0, 3, 3),
(0, 2, 1), (0, 1, 2), (0, 2, 3), (0, 3, 2),
(0, 2, 2), (3, 1, 4), (3, 3, 4), (1, 3, 4),
(1, 1, 4), (3, 2, 4), (2, 3, 4), (1, 2, 4),
(2, 1, 4), (2, 2, 4), (3, 1, 1), (3, 3, 1),
(1, 3, 1), (1, 1, 1), (3, 1, 3), (3, 3, 3),
(1, 3, 3), (1, 1, 3), (3, 2, 1), (2, 1, 1),
(3, 1, 2), (2, 3, 1), (3, 3, 2), (1, 2, 1),
(1, 3, 2), (1, 1, 2), (3, 2, 3), (2, 1, 3),
(2, 3, 3), (1, 2, 3), (2, 2, 1), (3, 2, 2),
(2, 1, 2), (2, 3, 2), (1, 2, 2), (2, 2, 3),
(2, 2, 2),
]
}[self.order]
# }}}
# }}}
# {{{ receiver interface
class GmshMeshReceiverBase(object):
"""
.. attribute:: gmsh_element_type_to_info_map
.. automethod:: set_up_nodes
.. automethod:: add_node
.. automethod:: finalize_nodes
.. automethod:: set_up_elements
.. automethod:: add_element
.. automethod:: finalize_elements
.. automethod:: add_tag
.. automethod:: finalize_tags
"""
gmsh_element_type_to_info_map = {
1: GmshIntervalElement(1),
2: GmshTriangularElement(1),
3: GmshQuadrilateralElement(1),
4: GmshTetrahedralElement(1),
5: GmshHexahedralElement(1),
8: GmshIntervalElement(2),
9: GmshTriangularElement(2),
10: GmshQuadrilateralElement(2),
11: GmshTetrahedralElement(2),
12: GmshHexahedralElement(2),
15: GmshPoint(0),
20: GmshIncompleteTriangularElement(3),
21: GmshTriangularElement(3),
22: GmshIncompleteTriangularElement(4),
23: GmshTriangularElement(4),
24: GmshIncompleteTriangularElement(5),
25: GmshTriangularElement(5),
26: GmshIntervalElement(3),
27: GmshIntervalElement(4),
28: GmshIntervalElement(5),
29: GmshTetrahedralElement(3),
30: GmshTetrahedralElement(4),
31: GmshTetrahedralElement(5),
92: GmshHexahedralElement(3),
93: GmshHexahedralElement(4),
}
def set_up_nodes(self, count):
pass
def add_node(self, node_nr, point):
pass
def finalize_nodes(self):
pass
def set_up_elements(self, count):
pass
def add_element(self, element_nr, element_type, vertex_nrs,
lexicographic_nodes, tag_numbers):
pass
def finalize_elements(self):
pass
def add_tag(self, name, index, dimension):
pass
def finalize_tags(self):
pass
# }}}
# {{{ receiver example
class GmshMeshReceiverNumPy(GmshMeshReceiverBase):
"""GmshReceiver that emulates the semantics of
:class:`meshpy.triangle.MeshInfo` and :class:`meshpy.tet.MeshInfo` by using
similar fields, but instead of loading data into ForeignArrays, load into
NumPy arrays. Since this class is not wrapping any libraries in other
languages -- the Gmsh data is obtained via parsing text -- use :mod:`numpy`
arrays as the base array data structure for convenience.
.. versionadded:: 2014.1
"""
def __init__(self):
# Use data fields similar to meshpy.triangle.MeshInfo and
# meshpy.tet.MeshInfo
self.points = None
self.elements = None
self.element_types = None
self.element_markers = None
self.tags = None
# Gmsh has no explicit concept of facets or faces; certain faces are a type
# of element. Consequently, there are no face markers, but elements can be
# grouped together in physical groups that serve as markers.
def set_up_nodes(self, count):
# Preallocate array of nodes within list; treat None as sentinel value.
# Preallocation not done for performance, but to assign values at indices
# in random order.
self.points = [None] * count
def add_node(self, node_nr, point):
self.points[node_nr] = point
def finalize_nodes(self):
pass
def set_up_elements(self, count):
# Preallocation of arrays for assignment elements in random order.
self.elements = [None] * count
self.element_types = [None] * count
self.element_markers = [None] * count
self.tags = []
def add_element(self, element_nr, element_type, vertex_nrs,
lexicographic_nodes, tag_numbers):
self.elements[element_nr] = vertex_nrs
self.element_types[element_nr] = element_type
self.element_markers[element_nr] = tag_numbers
# TODO: Add lexicographic node information
def finalize_elements(self):
pass
def add_tag(self, name, index, dimension):
self.tags.append((name, index, dimension))
def finalize_tags(self):
pass
# }}}
# {{{ file reader
class GmshFileFormatError(RuntimeError):
pass
def read_gmsh(receiver, filename, force_dimension=None):
"""Read a gmsh mesh file from *filename* and feed it to *receiver*.
:param receiver: Implements the :class:`GmshMeshReceiverBase` interface.
:param force_dimension: if not None, truncate point coordinates to
this many dimensions.
"""
mesh_file = open(filename, 'rt')
try:
result = parse_gmsh(receiver, mesh_file, force_dimension=force_dimension)
finally:
mesh_file.close()
return result
def generate_gmsh(receiver, source, dimensions=None, order=None, other_options=[],
extension="geo", gmsh_executable="gmsh", force_dimension=None,
output_file_name="output.msh"):
"""Run gmsh and feed the output to *receiver*.
:arg source: an instance of :class:`LiteralSource` or :class:`FileSource`
:param receiver: Implements the :class:`GmshMeshReceiverBase` interface.
"""
from meshpy.gmsh import GmshRunner
runner = GmshRunner(source, dimensions, order=order,
other_options=other_options, extension=extension,
gmsh_executable=gmsh_executable,
output_file_name=output_file_name)
runner.__enter__()
try:
result = parse_gmsh(receiver, runner.output_file,
force_dimension=force_dimension)
finally:
runner.__exit__(None, None, None)
return result
def parse_gmsh(receiver, line_iterable, force_dimension=None):
"""
:arg source: an instance of :class:`LiteralSource` or :class:`FileSource`
:arg receiver: This object will be fed the entities encountered in reading the
GMSH file. See :class:`GmshMeshReceiverBase` for the interface this
object needs to conform to.
:param force_dimension: if not None, truncate point coordinates to this many
dimensions.
"""
feeder = LineFeeder(line_iterable)
# collect the mesh information
class ElementInfo(Record):
pass
while feeder.has_next_line():
next_line = feeder.get_next_line()
if not next_line.startswith("$"):
raise GmshFileFormatError(
"expected start of section, '%s' found instead" % next_line)
section_name = next_line[1:]
if section_name == "MeshFormat":
line_count = 0
while True:
next_line = feeder.get_next_line()
if next_line == "$End"+section_name:
break
if line_count == 0:
version_number, file_type, data_size = next_line.split()
if line_count > 0:
raise GmshFileFormatError(
"more than one line found in MeshFormat section")
if version_number not in ["2.1", "2.2"]:
from warnings import warn
warn("unexpected mesh version number '%s' found"
% version_number)
if file_type != "0":
raise GmshFileFormatError(
"only ASCII gmsh file type is supported")
line_count += 1
elif section_name == "Nodes":
node_count = int(feeder.get_next_line())
receiver.set_up_nodes(node_count)
node_idx = 1
while True:
next_line = feeder.get_next_line()
if next_line == "$End"+section_name:
break
parts = next_line.split()
if len(parts) != 4:
raise GmshFileFormatError(
"expected four-component line in $Nodes section")
read_node_idx = int(parts[0])
if read_node_idx != node_idx:
raise GmshFileFormatError("out-of-order node index found")
if force_dimension is not None:
point = [float(x) for x in parts[1:force_dimension+1]]
else:
point = [float(x) for x in parts[1:]]
receiver.add_node(
node_idx-1,
np.array(point, dtype=np.float64))
node_idx += 1
if node_count+1 != node_idx:
raise GmshFileFormatError("unexpected number of nodes found")
receiver.finalize_nodes()
elif section_name == "Elements":
element_count = int(feeder.get_next_line())
receiver.set_up_elements(element_count)
element_idx = 1
while True:
next_line = feeder.get_next_line()
if next_line == "$End"+section_name:
break
parts = [int(x) for x in next_line.split()]
if len(parts) < 4:
raise GmshFileFormatError("too few entries in element line")
read_element_idx = parts[0]
if read_element_idx != element_idx:
raise GmshFileFormatError("out-of-order node index found")
el_type_num = parts[1]
try:
element_type = \
receiver.gmsh_element_type_to_info_map[el_type_num]
except KeyError:
raise GmshFileFormatError("unexpected element type %d"
% el_type_num)
tag_count = parts[2]
tags = parts[3:3+tag_count]
# convert to zero-based
node_indices = np.array(
[x-1 for x in parts[3+tag_count:]], dtype=np.intp)
if element_type.node_count() != len(node_indices):
raise GmshFileFormatError(
"unexpected number of nodes in element")
gmsh_vertex_nrs = node_indices[:element_type.vertex_count()]
zero_based_idx = element_idx - 1
tag_numbers = [tag for tag in tags[:1] if tag != 0]
receiver.add_element(element_nr=zero_based_idx,
element_type=element_type, vertex_nrs=gmsh_vertex_nrs,
lexicographic_nodes=node_indices[
element_type.get_lexicographic_gmsh_node_indices()],
tag_numbers=tag_numbers)
element_idx += 1
if element_count+1 != element_idx:
raise GmshFileFormatError("unexpected number of elements found")
receiver.finalize_elements()
elif section_name == "PhysicalNames":
name_count = int(feeder.get_next_line())
name_idx = 1
while True:
next_line = feeder.get_next_line()
if next_line == "$End"+section_name:
break
dimension, number, name = next_line.split(" ", 2)
dimension = int(dimension)
number = int(number)
if not name[0] == '"' or not name[-1] == '"':
raise GmshFileFormatError("expected quotes around physical name")
receiver.add_tag(name[1:-1], number, dimension)
name_idx += 1
if name_count+1 != name_idx:
raise GmshFileFormatError(
"unexpected number of physical names found")
receiver.finalize_tags()
else:
# unrecognized section, skip
from warnings import warn
warn("unrecognized section '%s' in gmsh file" % section_name)
while True:
next_line = feeder.get_next_line()
if next_line == "$End"+section_name:
break
# }}}
# vim: fdm=marker
|
# Copyright 2021 Hakan Kjellerstrand hakank@gmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Costas array in OR-tools CP-SAT Solver.
From http://mathworld.wolfram.com/CostasArray.html:
'''
An order-n Costas array is a permutation on {1,...,n} such
that the distances in each row of the triangular difference
table are distinct. For example, the permutation {1,3,4,2,5}
has triangular difference table {2,1,-2,3}, {3,-1,1}, {1,2},
and {4}. Since each row contains no duplications, the permutation
is therefore a Costas array.
'''
Also see
http://en.wikipedia.org/wiki/Costas_array
About this model:
This model is based on Barry O'Sullivan's model:
http://www.g12.cs.mu.oz.au/mzn/costas_array/CostasArray.mzn
and my small changes in
http://hakank.org/minizinc/costas_array.mzn
Since there is no symmetry breaking of the order of the Costas
array it gives all the solutions for a specific length of
the array, e.g. those listed in
http://mathworld.wolfram.com/CostasArray.html
1 1 (1)
2 2 (1, 2), (2,1)
3 4 (1, 3, 2), (2, 1, 3), (2, 3, 1), (3, 1, 2)
4 12 (1, 2, 4, 3), (1, 3, 4, 2), (1, 4, 2, 3), (2, 1, 3, 4),
(2, 3, 1, 4), (2, 4, 3, 1), (3, 1, 2, 4), (3, 2, 4, 1),
(3, 4, 2, 1), (4, 1, 3, 2), (4, 2, 1, 3), (4, 3, 1, 2)
....
See http://www.research.att.com/~njas/sequences/A008404
for the number of solutions for n=1..
1, 2, 4, 12, 40, 116, 200, 444, 760, 2160, 4368, 7852, 12828,
17252, 19612, 21104, 18276, 15096, 10240, 6464, 3536, 2052,
872, 200, 88, 56, 204,...
This is a port of my old CP solver model costas_array.py
This model was created by Hakan Kjellerstrand (hakank@gmail.com)
Also see my other OR-tools models: http://www.hakank.org/or_tools/
"""
from __future__ import print_function
from ortools.sat.python import cp_model as cp
import math, sys
# from cp_sat_utils import *
class SolutionPrinter(cp.CpSolverSolutionCallback):
"""SolutionPrinter"""
def __init__(self, costas, differences):
cp.CpSolverSolutionCallback.__init__(self)
self.__costas = costas
self.__differences = differences
self.__solution_count = 0
def OnSolutionCallback(self):
self.__solution_count += 1
print("costas:", [self.Value(self.__costas[i]) for i in range(n)])
print("differences:")
for i in range(n):
for j in range(n):
v = self.Value(self.__differences[i, j])
if v == -n + 1:
print(" ", end=" ")
else:
print("%2d" % v, end=" ")
print()
print()
def SolutionCount(self):
return self.__solution_count
def main(n=6):
model = cp.CpModel()
#
# data
#
print("n:", n)
#
# declare variables
#
costas = [model.NewIntVar(1, n, "costas[%i]" % i) for i in range(n)]
differences = {}
for i in range(n):
for j in range(n):
differences[(i, j)] = model.NewIntVar(-n + 1, n - 1,
"differences[%i,%i]" % (i, j))
#
# constraints
#
# Fix the values in the lower triangle in the
# difference matrix to -n+1. This removes variants
# of the difference matrix for the the same Costas array.
for i in range(n):
for j in range(i + 1):
model.Add(differences[i, j] == -n + 1)
# hakank: All the following constraints are from
# Barry O'Sullivans's original model.
#
model.AddAllDifferent(costas)
# "How do the positions in the Costas array relate
# to the elements of the distance triangle."
for i in range(n):
for j in range(n):
if i < j:
model.Add(differences[(i, j)] == costas[j] - costas[j - i - 1])
# "All entries in a particular row of the difference
# triangle must be distint."
for i in range(n - 2):
model.AddAllDifferent([differences[i, j] for j in range(n) if j > i])
#
# "All the following are redundant - only here to speed up search."
#
# "We can never place a 'token' in the same row as any other."
for i in range(n):
for j in range(n):
if i < j:
model.Add(differences[i, j] != 0)
for k in range(2, n):
for l in range(2, n):
if k < l:
model.Add(differences[k - 2, l - 1] + differences[k, l] ==
differences[k - 1, l - 1] + differences[k - 1, l])
#
# search and result
#
solver = cp.CpSolver()
# status = solver.Solve(model)
solution_printer = SolutionPrinter(costas, differences)
status = solver.SearchForAllSolutions(model,solution_printer)
if status == cp.OPTIMAL:
print("costas:", [solver.Value(costas[i]) for i in range(n)])
print("differences:")
for i in range(n):
for j in range(n):
v = solver.Value(differences[i, j])
if v == -n + 1:
print(" ", end=" ")
else:
print("%2d" % v, end=" ")
print()
print()
print()
print("NumSolutions:", solution_printer.SolutionCount())
print("NumConflicts:", solver.NumConflicts())
print("NumBranches:", solver.NumBranches())
print("WallTime:", solver.WallTime())
n = 6
if __name__ == "__main__":
if len(sys.argv) > 1:
n = int(sys.argv[1])
main(n)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-12 20:29
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0006_auto_20200212_1151'),
]
operations = [
migrations.AddField(
model_name='publication',
name='booktitle',
field=models.CharField(max_length=500, null=True),
),
migrations.AddField(
model_name='publication',
name='publisher',
field=models.CharField(max_length=500, null=True),
),
migrations.AlterField(
model_name='publication',
name='journal',
field=models.CharField(max_length=500, null=True),
),
]
|
import flask
import logging
from flask import Flask, send_from_directory, render_template, jsonify, url_for, request, redirect
from dirToPod import RssGenerator
from reverse_proxied import ReverseProxied
from eyed3 import id3
from random import random
from logging.handlers import RotatingFileHandler
import shutil
import os
import subprocess
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
SERVER_ROOT = '/var/www/books'
AUDIOBOOK_DIRECTORY = '/data/audiobooks'
LOGSIZE = 1000000 # 1MB
# Setup simple logging
log = logging.Logger('appLog')
log.addHandler(RotatingFileHandler('/tmp/dirToPod.log', maxBytes=LOGSIZE, backupCount=1))
@app.route("/")
def index():
pageText = ''
for item in sorted(os.listdir(AUDIOBOOK_DIRECTORY)):
if not item.startswith('.'):
pageText += '<p><a href="%s">%s</a></p>' % (url_for('getRss', path=item.replace(' ', '_')), item)
return pageText
@app.route("/audiobook/<path:path>")
def getRss(path):
# if not os.path.exists("%s/%s.xml" % (SERVER_ROOT, path)):
RssGenerator('http://{0}{1}'.format(request.host, url_for('index')[:-1]), "%s/%s" % (AUDIOBOOK_DIRECTORY, path.replace('_',' ')), path)
return send_from_directory(SERVER_ROOT, "%s.xml" % path)
#return redirect("http://%s/%s.xml" % (request.headers['Host'], path))
@app.route("/files")
def files():
return render_template('files.html')
@app.route("/api/joinFiles", methods=['POST'])
def joinFiles():
newName = request.json['newName']
fileList = request.json['fileList']
newName = newName + '.mp3' if not newName.endswith('.mp3') else newName
cwd = fileList[0] if os.path.isdir(fileList[0]) else os.path.dirname(fileList[0])
filesToJoin = [aFile for aFile in fileList if aFile.endswith('.mp3')]
map(lambda aFile: id3.tag.Tag.remove(aFile), filesToJoin)
filesToJoin = sorted([os.path.relpath(aFile, cwd) for aFile in filesToJoin])
args = ['/usr/bin/ffmpeg', '-i', 'concat:%s' % '|'.join(filesToJoin), '-y', '-acodec', 'copy', newName]
runProcess(args, cwd)
args = ['/usr/bin/vbrfix', '-always', newName, newName]
runProcess(args, cwd)
# cleanup crap leftover from vbrfix
leftoverCrap = [os.path.join(cwd, crapFile) for crapFile in ['vbrfix.log', 'vbrfix.tmp'] if os.path.isfile(os.path.join(cwd, crapFile))]
map(lambda crap: os.remove(crap), leftoverCrap)
return ""
@app.route("/api/hierarchy", methods=['GET'])
def getHierarchy():
def addEntries(entryList, icon):
for entry in entryList:
data.append({ 'id' : os.path.join(root, entry), 'text' : entry, 'parent' : root, 'icon' : icon})
data = [{'id' : AUDIOBOOK_DIRECTORY, 'text' : 'audiobooks', 'parent' : '#'}]
for root,dirs,files in os.walk(AUDIOBOOK_DIRECTORY):
files = [f for f in files if not f[0] == '.']
dirs[:] = [d for d in dirs if not d[0] == '.']
addEntries(dirs, 'jstree-folder')
addEntries(files, 'jstree-file')
return jsonify(data=data)
@app.route('/api/delete', methods=['POST'])
def delete():
if request.json.has_key('nodeList'):
nodeList = request.json['nodeList']
else:
nodeList = []
nodeList.append(request.json['node'])
for node in nodeList:
if os.path.isdir(node):
shutil.rmtree(node)
elif os.path.isfile(node):
os.remove(node)
#If its not a file or directory, we either already deleted it by deleting the directory, or we've got a symlink, which we ideally wanna keep around
return ''
@app.route('/api/rename', methods=['POST'])
def rename():
oldPath = request.json['oldPath']
newPath = os.path.join(os.path.dirname(oldPath), request.json['newNode'])
if os.path.exists(newPath):
raise OSError('Path already exists!')
os.rename(oldPath, newPath)
return ''
@app.route('/api/move', methods=['POST'])
def move():
oldPath = request.json['oldPath']
dropTo = request.json['dropTo']
if os.path.isdir(dropTo):
newPath = dropTo
elif os.path.isfile(dropTo):
newPath = os.path.dirname(dropTo)
else:
log.debug('Trying to drop on file that is neither directory or file...what is it?')
newPath = newPath + os.path.sep + os.path.basename(oldPath)
if os.path.exists(newPath):
raise OSError('Path already exists!')
shutil.move(oldPath, newPath)
return ''
@app.route('/api/reencode', methods=['POST'])
def reencode():
fileList = request.json['fileList']
for aFile in fileList:
if aFile.endswith('.mp3'):
fileDir = os.path.dirname(aFile)
filename = os.path.basename(aFile)
oldDir = os.path.join(fileDir, 'old')
if not os.path.isdir(oldDir):
os.mkdir(oldDir)
os.rename(aFile, os.path.join(oldDir, filename))
args = ['/usr/bin/ffmpeg', '-i', os.path.join(oldDir, filename), '-ab', '96k', aFile]
runProcess(args, fileDir)
return ''
def runProcess(args, cwd):
log.debug(args)
stdout = '/tmp/stdout%s' % str(random()).split('.')[1]
with open(stdout, 'w') as out:
pobj = subprocess.Popen(args, stdout=out, stderr=subprocess.STDOUT, cwd=cwd)
ret = pobj.wait()
with open(stdout, 'r') as out:
log.debug(out.read())
os.remove(stdout)
if __name__ == "__main__":
app.run('0.0.0.0', port=8881, debug=True)
|
from pymongo import MongoClient
from data_prep.config import mongo, neo4j
from py2neo import Graph, Node, Relationship, authenticate
class DBCrawl:
def __init__(self):
self.db = mongo['db_name']
self.host = mongo['host']
self.port = mongo['port']
self.client = MongoClient(self.host, self.port)
authenticate(neo4j['host'] + ':' + neo4j['port'],
neo4j['username'], neo4j['password'])
self.graph = Graph()
def _getCollections(self):
collections = self.client.db.getCollectionNames()
for collection in collections:
return collections
def _migrate_data(self, collection, neo4j_label):
'''
:param collection: The collection object that need to be _migrate_data
:param neo4j_label: The label to be used in Neo4j for this collection
'''
for item in collection.find({}):
try:
item.pop('_id', None)
node = Node(neo4j_label, **item)
self.graph.create(node)
except Exception as e:
print(str(e))
def migrate_relations_acquisition_company(self):
for acq in self.graph.find("Acquisition"):
comp = self.graph.find_one(
"Company", "name", acq.properties['acquirer_name'])
ac_to_comp = Relationship(acq, "details", comp)
self.graph.create(ac_to_comp)
def migrate_relations_company_acquisition(self):
for comp in self.graph.find("Company"):
acq = self.graph.find_one(
"Acquisition", "company_name", comp.properties['name'])
comp_to_ac = Relationship(comp, "acquisition", acq)
self.graph.create(comp_to_ac)
def migrate_all_collections(self):
collections = self._getCollections()
for collection in collections:
self._migrate_data(collection, collection.name)
self.migrate_relations_company_acquisition()
self.migrate_relations_acquisition_company()
|
import numpy as np
from utils.test_env import EnvTest
class LinearSchedule(object):
def __init__(self, eps_begin, eps_end, nsteps):
"""
Args:
eps_begin: initial exploration
eps_end: end exploration
nsteps: number of steps between the two values of eps
"""
self.epsilon = eps_begin
self.eps_begin = eps_begin
self.eps_end = eps_end
self.nsteps = nsteps
def update(self, t):
"""
Updates epsilon
Args:
t: int
frame number
"""
if t > self.nsteps:
self.epsilon = self.eps_end
else:
alpha = (1.0 * t) / self.nsteps
self.epsilon = (alpha * self.eps_end) \
+ ((1-alpha) * self.eps_begin)
class LinearExploration(LinearSchedule):
def __init__(self, env, eps_begin, eps_end, nsteps):
"""
Args:
env: gym environment
eps_begin: float
initial exploration rate
eps_end: float
final exploration rate
nsteps: int
number of steps taken to linearly decay eps_begin to eps_end
"""
self.env = env
super(LinearExploration, self).__init__(eps_begin, eps_end, nsteps)
def get_action(self, best_action):
"""
Returns a random action with prob epsilon, otherwise returns the best_action
Args:
best_action: int
best action according some policy
Returns:
an action
"""
##############################################################
if np.random.uniform(0, 1) < self.epsilon:
return self.env.action_space.sample()
else:
return best_action
|
from collections import defaultdict
from ipaddress import ip_address, IPv4Address, IPv6Address
from typing import Dict, Union, List
from twisted.internet import defer
from twisted.names import dns
from twisted.python.failure import Failure
from dnsagent import logger
from dnsagent.resolver.base import BaseResolver
from dnsagent.utils import watch_modification
__all__ = ('HostsResolver',)
Name2IpListType = Dict[str, List[Union[IPv4Address, IPv6Address]]]
def validate_domain_name(name: str):
# TODO:
# name = name.encode('utf-8').decode('idna').lower()
return True
def parse_hosts_file(lines) -> Name2IpListType:
def bad_line(lineno, line):
logger.error('bad host file. line %d, %r', lineno, line)
name2ip = defaultdict(list)
for lineno, line in enumerate(lines):
line = line.partition('#')[0].strip()
if line:
# TODO: distinguish between canonical name and aliases
ip, *domains = line.split()
if not domains:
bad_line(lineno, line)
continue
try:
ipobj = ip_address(ip)
except ValueError:
bad_line(lineno, line)
continue
for do in domains:
name = do.lower()
if not validate_domain_name(name):
logger.error('bad domain. line %d, domain %r', lineno, name)
continue
if ipobj not in name2ip[name]:
name2ip[name].append(ipobj)
return dict(name2ip)
def read_hosts_file(filename: str):
with open(filename, 'rt') as fp:
return parse_hosts_file(fp)
class HostsResolver(BaseResolver):
"""
A resolver that services hosts(5) format files.
ref: twisted.names.hosts.Resolver
"""
def __init__(self, *, filename=None, mapping=None, ttl=60*60, reload=False):
super().__init__()
self.filename = filename
self.ttl = ttl
if filename is not None:
assert mapping is None
self._load_hosts()
if reload:
watch_modification(filename, self._load_hosts)
elif mapping is not None:
self.name2iplist = dict() # type: Name2IpListType
for domain, value in mapping.items():
if isinstance(value, str):
value = [value]
self.name2iplist[domain.lower()] = [ip_address(ip) for ip in value]
def _load_hosts(self):
logger.debug('loading hosts file: %s', self.filename)
self.name2iplist = read_hosts_file(self.filename)
_ipversion_to_dns_type = {
4: dns.A, 6: dns.AAAA,
}
_ipversion_to_record_type = {
4: dns.Record_A, 6: dns.Record_AAAA,
}
def _get_records(self, name: Union[str, bytes], ip_versions):
if isinstance(name, bytes):
name_str, name_bytes = name.decode('ascii').lower(), name
else:
name_str, name_bytes = name, name.encode('idna')
return tuple(
dns.RRHeader(
name_bytes, self._ipversion_to_dns_type[addr.version], dns.IN, self.ttl,
self._ipversion_to_record_type[addr.version](addr.exploded, self.ttl),
)
for addr in self.name2iplist.get(name_str, [])
if addr.version in ip_versions
)
def _respond(self, name, records, **kwargs):
"""
Generate a response for the given name containing the given result
records, or a failure if there are no result records.
@param name: The DNS name the response is for.
@type name: C{str}
@param records: A tuple of L{dns.RRHeader} instances giving the results
that will go into the response.
@return: A L{Deferred} which will fire with a three-tuple of result
records, authority records, and additional records, or which will
fail with L{dns.DomainError} if there are no result records.
"""
if records:
logger.info('[%d]answer from hosts: %r', kwargs.get('request_id', -1), records)
return defer.succeed((records, (), ()))
else:
return defer.fail(Failure(dns.DomainError(name)))
def lookupAddress(self, name, timeout=None, **kwargs):
"""
Return any IPv4 addresses from C{self.name2ip} as L{Record_A} instances.
"""
return self._respond(name, self._get_records(name, {4}), **kwargs)
def lookupIPV6Address(self, name, timeout=None, **kwargs):
"""
Return any IPv6 addresses from C{self.name2ip} as L{Record_AAAA} instances.
"""
return self._respond(name, self._get_records(name, {6}), **kwargs)
def lookupAllRecords(self, name, timeout=None, **kwargs):
"""
Return any addresses from C{self.name2ip} as either
L{Record_AAAA} or L{Record_A} instances.
"""
return self._respond(name, self._get_records(name, {4, 6}), **kwargs)
def lookupPointer(self, name, timeout=None, **kwargs):
# TODO: ptr
return defer.fail(NotImplementedError("HostsResolver.lookupPointer"))
def __repr__(self):
return '<Hosts: {}>'.format(self.filename)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#******************************************************************************
#
# mask_all.py
# ---------------------------------------------------------
# Apply raster B to raster A to add 0 where raster A values are nodata or missing (but exist in raster B)
# More: http://github.com/nextgis/dhi
#
# Usage:
# patch_raster.py [-h] [-o OUTPUT_FOLDER] (-rs INPUT_RASTERS | -if INPUT_FOLDER) template value
# where:
# -h show this help message and exit
# -rs INPUT_RASTERS input GeoTIFF rasters to be patched, separated by comma
# -if INPUT_FOLDER input folder of GeoTIFF rasters to be patched
# -of OUTPUT_FOLDER output folder, if missing input will be overwritten
#
# Example:
# python mask_all.py -if x:\MOD13A2\2002\tif-evi-qa -of x:\MOD13A2\2002\tif-evi-qa-mask
#
# Copyright (C) 2015 Maxim Dubinin (sim@gis-lab.info), Alexander Muriy (amuriy AT gmail.com)
#
# This source is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This code is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# A copy of the GNU General Public License is available on the World Wide Web
# at <http://www.gnu.org/copyleft/gpl.html>. You can also obtain it by writing
# to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston,
# MA 02111-1307, USA.
#
#******************************************************************************
import os
import sys
import shutil
import argparse
import glob
parser = argparse.ArgumentParser()
parser.add_argument('-if','--input_folder', help='Input folder with GeoTIFF(s)')
parser.add_argument('-rs','--input_rasters', help='Input GeoTIFF(s) separated by comma')
parser.add_argument('-of','--output_folder', help='Output folder, if missing input(s) will be overwritten')
parser.add_argument('-m','--mask', help='Path to the raster which will be used as a binary mask')
parser.add_argument('-o','--overwrite', action="store_true", help='Overwrite outputs')
args = parser.parse_args()
def sanitize(folder):
if not folder.endswith('\\'): folder = folder + '\\'
return folder
if __name__ == '__main__':
if not args.input_folder:
print('Please select the input folder')
sys.exit(1)
od = ''
if args.output_folder: od = sanitize(args.output_folder)
if args.input_folder:
id = sanitize(args.input_folder)
os.chdir(args.input_folder)
if args.input_rasters:
inputs = args.input_rasters.split(',')
inputs = [id + os.sep + x for x in inputs]
else:
inputs = glob.glob(id + '*.tif')
elif args.input_rasters and not args.input_folder:
inputs = args.input_rasters.split(',')
# gisbase = os.environ['GISBASE'] = "c:/OSGeo4W/apps/grass/grass-7.0.3/"
# gisdbase = os.environ['GISDBASE'] = "e:/users/maxim/thematic/dhi/"
# location = "dhi_grass"
# mapset = "gpp"
# sys.path.append(os.path.join(gisbase, "etc", "python"))
# import grass.script as grass
# import grass.script.setup as gsetup
# gsetup.init(gisbase, gisdbase, location, mapset)
# grass.run_command('r.in.gdal', input_ = 'y:/dhi/masks/Fpar_NoData_sin.tif', output = 'Fpar_NoData_sin_b', overwrite = True)
for input in inputs:
if os.path.exists(input):
input_name = os.path.basename(input).replace('.tif','')
print('Processing ' + input)
# grass.run_command('r.in.gdal', input_ = input, output = input_name, overwrite = True)
# grass.mapcalc(input_name + '_0 = if(isnull(' + input_name + '),Fpar_NoData_sin_b,' + input_name + ')', overwrite = True)
# grass.run_command('r.out.gdal', input_ = input_name + '_0', output = id + input_name + '_0.tif', overwrite = True)
cmd = 'gdal_calc.py --overwrite ' + ' -A ' + args.mask + ' -B ' + input + ' --outfile=' + od + input_name + '.tif' + ' --calc="A*B"' + ' --NoDataValue=255'
# cmd = 'gdal_calc.py --overwrite ' + ' -A y:/dhi/masks/Fpar_NoData_sin_b.tif ' + ' -B ' + id + input_name + '_0.tif' + ' --outfile=' + od + input_name + '.tif' + ' --calc="A*B" --NoDataValue=255' + ' --overwrite'
print cmd
os.system(cmd)
|
def findSep(self):
text = self.text
data = list(text)
found="-1"
for x in data:
found=type(x)
if (found != "-1"):
break
return found
def type(i):
switcher={
' ':'space',
',':'comma',
';':'semi-colon',
':':'colon',
'|':'ampersand',
'.':'period',
}
return switcher.get(i,"-1")
|
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 25 18:37:11 2020
@author: autol
"""
#%%
import numpy as np
import pandas as pd
df = pd.DataFrame({'a': np.random.randn(1000),
'b': np.random.randn(1000),
'N': np.random.randint(100, 1000, (1000)),
'x': 'x'})
def f(x):
return x * (x - 1)
def integrate_f(a, b, N):
s = 0
dx = (b - a) / N
for i in range(N):
s += f(a + i * dx)
return s * dx
#%%
%timeit df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1)
%prun -l 4 df.apply(lambda x: integrate_f(x['a'], x['b'], x['N']), axis=1)
#%%
%load_ext Cython
#%%
%%cython
def f_plain(x):
return x * (x - 1)
def integrate_f_plain(a, b, N):
s = 0
dx = (b - a) / N
for i in range(N):
s += f_plain(a + i * dx)
return s * dx
#%%
%timeit df.apply(lambda x: integrate_f_plain(x['a'], x['b'], x['N']), axis=1)
|
#!/usr/bin/python
#
# Copyright 2019 Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
def main(argv):
pass
if __name__ == '__main__':
main(sys.argv)
class Dockerfile:
def __init__(self, from_image, commands):
self.from_image = "FROM {}\n".format(from_image)
self.commands = commands
self.layers = []
def create(self):
for command in self.commands:
if command.required_instruction == "copy":
for line in command.get_lines():
self.layers.append(DockerLayer(command.required_instruction))
self.layers[-1].append(line)
else:
if len(self.layers) == 0 or not self.layers[-1].supports_command_instruction(command.required_instruction):
self.layers.append(DockerLayer(command.required_instruction))
for line in command.get_lines():
self.layers[-1].append(line)
def to_string(self):
output = self.from_image
for layer in self.layers:
output += layer.get_layer_as_string()
return output
def add_welcome_message(self, message):
header="This container comes with the following preinstalled tools:\\n\\"
self.layers.append(DockerLayer("run", False))
self.layers[-1].append(
"""echo '[ ! -z "$TERM" -a -r /etc/motd ] && cat /etc/issue && cat /etc/motd' >> /etc/bash.bashrc; echo "\\\n{}{}" > /etc/motd"""
.format(header, message))
class DockerLayer:
def __init__(self, instruction, reindent_string_output=True):
self.instruction = instruction
self.commands = []
self.reindent_string_output = reindent_string_output
def supports_command_instruction(self, instruction):
if instruction == "copy":
return False
else:
return self.instruction == instruction
def append(self, command):
if len(self.commands) != 0:
self.commands[-1] += ";\\\n"
self.commands.append(command)
def get_layer_as_string(self):
output = self.instruction + " "
for command in self.commands:
output += command
if self.reindent_string_output:
return self.reindent(output)
return output
@staticmethod
def reindent(command):
command_lines = command.split('\n')
command_lines = command_lines[:1] + [(4 * ' ') + command_line.lstrip() for command_line in command_lines[1:]]
return '\n'.join(command_lines) + '\n'
|
from typing import Dict, List, Any, Optional
import timm
import torch
import torch.nn as nn
class BaseTimmModel(nn.Module):
"""Convolution models from timm
name: `str`
timm model name
num_classes: `int`
number of classes
from_pretrained: `bool`
whether to use timm pretrained
classnames: `Optional[List]`
list of classnames
"""
def __init__(
self,
name: str,
num_classes: int = 1000,
from_pretrained: bool = True,
classnames: Optional[List] = None,
**kwargs
):
super().__init__()
self.name = name
self.classnames = classnames
if num_classes != 1000:
self.model = timm.create_model(name, pretrained=from_pretrained, num_classes=num_classes)
else:
self.model = timm.create_model(name, pretrained=from_pretrained)
def get_model(self):
"""
Return the full architecture of the model, for visualization
"""
return self.model
def forward(self, x: torch.Tensor):
outputs = self.model(x)
return outputs
def get_prediction(self, adict: Dict[str, Any], device: torch.device):
"""
Inference using the model.
adict: `Dict[str, Any]`
dictionary of inputs
device: `torch.device`
current device
"""
inputs = adict['inputs'].to(device)
outputs = self.model(inputs)
probs, outputs = torch.max(torch.softmax(outputs, dim=1), dim=1)
probs = probs.cpu().detach().numpy()
classids = outputs.cpu().detach().numpy()
if self.classnames:
classnames = [self.classnames[int(clsid)] for clsid in classids]
else:
classnames = []
return {
'labels': classids,
'confidences': probs,
'names': classnames,
}
|
"""
Given an account name and a shard_id, make a request to the bulk stash tab API of Path of Exile
http://api.pathofexile.com/public-stash-tabs
If the account is not found in the payload, update the scheduler with the next invocation and shardId and return,
if it is, then update the account in the database and return
"""
import requests
import json
POE_URL = "http://api.pathofexile.com/public-stash-tabs"
def hello(event, context):
r = requests.get(url=POE_URL)
body = json.loads(r.text)
matches = filter(lambda x: x["accountName"] == event["account"], body["stashes"])
count = sum(1 for _ in matches)
return "Found " + str(count) + " matches for account name " + event["account"]
|
"""Creates a table of transfer functions for a StateSpace (MIMO) model."""
import controlSBML.constants as cn
import controlSBML as ctl
from controlSBML.option_management.option_manager import OptionManager
import control
from docstring_expander.expander import Expander
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
NUM_FREQ = 100 # Number of frequences in radians/sec
FREQ_RNG = [1e-2, 1e2]
LOW = 0
HIGH = 1
class StateSpaceTF(object):
def __init__(self, mimo_sys, input_names=None, output_names=None):
"""
Parameters
----------
mimo_sys: control.StateSpace
control.StateSpace
input_names: list-str
names of the inputs
output_names: list-str
names of the outputs
"""
self.dataframe = self.ss2tf(mimo_sys, input_names=input_names,
output_names=output_names)
self.input_names = list(self.dataframe.columns)
self.output_names = list(self.dataframe.index)
self.num_state, self.num_input, self.num_output = self.getSystemShape(
mimo_sys)
def __str__(self):
stgs = ["(input, output)\n\n"]
indents = " "
for inp in self.dataframe.columns:
for out in self.dataframe.index:
pfx = "(%s, %s): " % (inp, out)
stg = str(self.dataframe.loc[inp, out])[:-1] # Exclude nl
stg = stg.replace("\n", "", 1)
stg = stg.replace("\n", "\n" + indents)
stgs.append(pfx + stg + "\n")
return ("\n").join(stgs)
@staticmethod
def getSystemShape(sys):
"""
Provides the number of states, number of inputs, and number of outputs.
Parameters
----------
sys: control.StateSpace
Returns
-------
int: num states
int: num inputs
int: num outputs
"""
return sys.nstates, sys.ninputs, sys.noutputs
@classmethod
def ss2tf(cls, mimo_sys, input_names=None, output_names=None):
"""
Creates a dataframe of transform functions for each combination
of inputs and outputs.
Parameters
----------
mimo_sys: control.StateSpace
Returns
-------
DataFrame
column: input
index: output
value: control.TransferFunction
"""
num_state, num_input, num_output = cls.getSystemShape(mimo_sys)
A_mat = mimo_sys.A
if input_names is None:
input_names = [str(n) for n in range(1, num_input+1)]
else:
input_names = list(input_names)
if output_names is None:
output_names = [str(n) for n in range(1, num_output+1)]
else:
output_names = list(output_names)
# Construct matrices for a 1-input, 1-output state space model
B_base_mat = np.reshape(np.repeat(0, num_state), (num_state, 1))
C_base_mat = np.reshape(np.repeat(0, num_state), (1, num_state))
D_mat = np.repeat(0, 1)
# Construct the dataframe entries
dct = {n: [] for n in output_names}
for out_idx, output_name in enumerate(output_names):
for inp_idx, input_name in enumerate(input_names):
# Construct the SISO system
input_name = input_names[inp_idx]
B_mat = np.array(B_base_mat)
B_mat[inp_idx, 0] = 1
C_mat = np.array(C_base_mat)
C_mat[0, out_idx] = 1
new_sys = control.StateSpace(A_mat, B_mat, C_mat, D_mat)
siso_tf = control.ss2tf(new_sys)
dct[output_name].append(siso_tf)
#
df = pd.DataFrame(dct).transpose()
df.index = output_names
df.columns = input_names
df.index.name = "Outputs"
df.columns.name = "Inputs"
return df
@Expander(cn.KWARGS, cn.ALL_KWARGS)
def plotBode(self, is_magnitude=True, is_phase=True, **kwargs):
"""
Constructs bode plots for a MIMO system. This is done by constructing n*n
SISO systems where there n states.
Parameters
----------
is_magnitude: bool
Do magnitude plots
is_phase: bool
Do phase plots
is_plot: bool
Display plots
#@expand
"""
# Calculate magnitudes and phases for al inputs and outputs
freq_arr = np.array(range(NUM_FREQ))
delta = (FREQ_RNG[HIGH] - FREQ_RNG[LOW])/NUM_FREQ
freq_arr = (freq_arr + FREQ_RNG[LOW])*delta
mgr = OptionManager(kwargs)
legends = []
for out_idx, out_name in enumerate(self.output_names):
for inp_idx, inp_name in enumerate(self.input_names):
siso_tf = self.dataframe.loc[out_name, inp_name]
# Create the plot data
_ = control.bode(siso_tf, freq_arr)
# Construct the plot
legend =" %s->%s" % (inp_name, out_name)
legends.append(legend)
mgr.plot_opts.set(cn.O_LEGEND_SPEC, default=ctl.LegendSpec(legends,
crd=mgr.plot_opts[cn.O_LEGEND_CRD]))
mgr.doPlotOpts()
mgr.doFigOpts()
|
# using nltk vader built in lexicon classifier because generating dataset for
# classifier myself would take too long, might create custom dataset tho
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import constants
financial_word_classification = {
"bullish": 1,
"bearish": -1,
"volatile": -0.3,
"risen": 0.5,
"fell": -0.5,
"growth": 0.5,
"rally": 0.5,
"buy": 0.7,
"sell": -0.7,
} # add other words
nltk.download("vader_lexicon")
sentiment_polarity_analyzer = SentimentIntensityAnalyzer()
sentiment_polarity_analyzer.lexicon.update(financial_word_classification)
def sentiment_analyzer(sentences):
summation = 0
number_of_neutral = 0
for sentence in sentences:
sentence_score = sentiment_polarity_analyzer.polarity_scores(sentence)["compound"]
summation += sentence_score
if sentence_score == 0:
number_of_neutral += 1
if len(sentences) >= constants.ARTICLE_MIN_COUNT_NEWS:
return summation / (len(sentences) - number_of_neutral / 2)
return 0
|
from shelf.metadata.result import Result
from shelf.metadata.error_code import ErrorCode
import copy
class Manager(object):
"""
Responsible for maintaining metadata integrity between multiple
places. Not only should the metadata match but it also needs to
be initialized with some values if it doesn't already exist.
Although they should always match, the cloud storage is our
source of truth.
"""
def __init__(self, container):
self.container = container
self.update_manager = self.container.update_manager
self.identity = self.container.resource_identity
self.portal = self.container.bucket_container.cloud_portal
self.initializer = self.container.bucket_container.initializer
self._metadata = None
@property
def metadata(self):
"""
Can be used like a dict
Returns shelf.metadata.wrapper.Wrapper
"""
if not self._metadata:
data = self.load()
self._metadata = self.container.create_wrapper(data)
return self._metadata
def _notify_updated_hook(self):
self.container.hook_manager.notify_metadata_updated(self.container.resource_identity)
def load(self):
"""
Loads metadata from the cloud.
Returns
dict
"""
data = self.portal.load(self.identity.cloud_metadata)
if self.initializer.needs_update(data):
data = self.initializer.update(self.identity, data)
self.portal.update(self.identity.cloud_metadata, data)
return data
def write(self):
"""
Updates the cloud to contain the metadata set on this instance.
"""
self.portal.update(self.identity.cloud_metadata, self.metadata)
formatted_metadata = self.container.mapper.to_response(self.metadata)
self.update_manager.update(self.identity.search, formatted_metadata)
def try_update(self, data):
"""
Overwrites the metadata with the data provided. The only
caveat is that if you try to set metadata that is immutable
it will be ignored.
Args:
data(schemas/metadata.json)
Returns:
shelf.metadata.result.Result
"""
data = self.container.mapper.from_response(data)
old_meta = copy.deepcopy(self.metadata)
for key, val in old_meta.iteritems():
new_meta = data.get(key)
if new_meta:
if not self.metadata.is_immutable(key):
self.metadata[key] = new_meta
data.pop(key)
else:
if not self.metadata.is_immutable(key):
del self.metadata[key]
if len(data) > 0:
self.metadata.update(data)
# assuming success if it hasn't thrown an exception
self.write()
self._notify_updated_hook()
return Result()
def try_update_property(self, key, value):
"""
Updates a single metadata property
Args:
key(string)
value(schemas/metadata-property.json)
Returns:
shelf.metadata.result.Result
"""
result = Result()
result = self._try_update_property_with_result(key, value, result)
return result
def try_create_property(self, key, value):
"""
Creates a single metadata property. Will error if the
property already exists.
Args:
key(string)
value(schemas/metadata-property.json)
Returns:
shelf.metadata.result.Result
"""
result = Result()
if self.metadata.get(key):
result.add_error(ErrorCode.DUPLICATE)
else:
result = self._try_update_property_with_result(key, value, result)
return result
def try_delete_property(self, key):
"""
Deletes a single metadata property.
Args:
key(string): Name of the metadata property
Returns:
shelf.metadata.result.Result
"""
result = Result()
if self.metadata.is_immutable(key):
result.add_error(ErrorCode.IMMUTABLE)
else:
del self.metadata[key]
self.write()
self._notify_updated_hook()
return result
def _try_update_property_with_result(self, key, value, result):
if not self.metadata.is_immutable(key):
value = self.container.mapper.from_response_property(value)
self.metadata[key] = value
self.write()
result.value = value
self._notify_updated_hook()
else:
result.add_error(ErrorCode.IMMUTABLE)
return result
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#http://stackoverflow.com/questions/4629595/using-pysides-qtwebkit-under-windows-with-py2exe
#http://qt-project.org/wiki/Packaging_PySide_applications_on_Windows
__author__ = 'joseph'
from distutils.core import setup
import py2exe
import sys
# If run without args, build executables, in quiet mode.
if len(sys.argv) == 1:
sys.argv.append("py2exe")
sys.argv.append("-q")
RT_MANIFEST = 24
INCLUDES = ["encodings","encodings.*","PySide.QtNetwork"]
options = {"py2exe" :
{"compressed" : 1,
"optimize" : 2,
"bundle_files" : 3,
"includes" : INCLUDES,
"excludes" : [],
"dll_excludes": [ ] }}
windows = [{"script": "editor.py",
"icon_resources": [],
}]
setup(name = "PtEditor",
version = "1.0",
description = "PtEditor",
author = "joseph",
author_email ="",
maintainer = "",
maintainer_email = "",
license = "Licence",
url = "",
data_files = [],
#zipfile=None,
options = options,
windows = windows,
)
|
import wx
import vdesk
import win32con
import sys
import movementindicator
class Model(object):
def __init__(self):
pass
def shortcut_data(self):
def generate_shortcut(Modifiers, Key, Function):
return {
"Modifiers": Modifiers,
"Key": Key,
"Function": Function
}
return (
generate_shortcut(
win32con.MOD_ALT | win32con.MOD_WIN,
win32con.VK_RIGHT,
lambda Controller: Controller.display_next()
),
generate_shortcut(
win32con.MOD_ALT | win32con.MOD_WIN,
win32con.VK_LEFT,
lambda Controller: Controller.display_previous()
),
generate_shortcut(
win32con.MOD_CONTROL | win32con.MOD_WIN,
win32con.VK_RIGHT,
lambda Controller: Controller.move_window_to_next_desktop_and_display()
),
generate_shortcut(
win32con.MOD_CONTROL | win32con.MOD_WIN,
win32con.VK_LEFT,
lambda Controller: Controller.move_window_to_previous_desktop_and_display()
)
)
class Controller(object):
def __init__(self):
self.EventIDs = []
self.DesktopManager = vdesk.DesktopManager()
self.Model = Model()
self.Window = Window(self)
self.TaskBarIcon = TaskBarIcon(self.Window, self)
self.register_hot_keys(self.Model.shortcut_data())
self.MIController = movementindicator.Controller()
def register_hot_keys(self, Hotkeys):
def register_hotkey(Hotkey):
ID = wx.NewId()
self.Window.RegisterHotKey(ID, Hotkey["Modifiers"], Hotkey["Key"])
self.Window.Bind(wx.EVT_HOTKEY, lambda Event: Hotkey["Function"](self),
id=ID)
self.EventIDs.append(ID)
[register_hotkey(Hotkey) for Hotkey in Hotkeys]
def unregister_hotkeys(self):
for ID in self.EventIDs:
self.Window.UnregisterHotKey(ID)
self.EventIDs = []
def display_next_desktop(self):
self.DesktopManager.display_next()
self.MIController.next()
def display_previous_desktop(self):
self.DesktopManager.display_previous()
self.MIController.previous()
def move_window_to_next_desktop_and_display(self):
try:
self.DesktopManager.move_window_to_next_desktop_and_display()
self.MIController.next()
except vdesk.NoForegroundWindow:
pass
def move_window_to_previous_desktop_and_display(self):
try:
self.DesktopManager.move_window_to_previous_desktop_and_display()
self.MIController.previous()
except vdesk.NoForegroundWindow:
pass
def close(self):
self.Window.Hide()
def exit(self):
self.unregister_hotkeys()
self.DesktopManager.show_all_windows()
self.TaskBarIcon.RemoveIcon()
sys.exit(1)
def icon_dbl_click(self):
if self.Window.IsIconized():
self.Window.Iconize(False)
if not self.Window.IsShown():
self.Window.Show(True)
self.Window.Raise()
class TaskBarIcon(wx.TaskBarIcon):
def __init__(self, Parent, Controller):
wx.TaskBarIcon.__init__(self)
def create_child_widgets():
self.Icon = wx.EmptyIcon()
self.ParentFrame = Parent
self.Controller = Controller
self.Menu = wx.Menu()
def configure():
self.EventIDs = {
"NextDesktop": wx.NewId(),
"PreviousDesktop": wx.NewId(),
"Exit": wx.NewId(),
"LaunchPreferences": wx.NewId(),
"LaunchMenu": wx.NewId()
}
def load_bitmap():
image = wx.Image("Resources/icon.bmp", wx.BITMAP_TYPE_ANY)
image.SetMaskColour(255, 0, 255)
return wx.BitmapFromImage(image)
self.Icon.CopyFromBitmap(load_bitmap())
self.SetIcon(self.Icon, "WindowPusher")
self.Menu.Append(self.EventIDs["NextDesktop"], "Display Next Desktop")
self.Menu.Append(self.EventIDs["PreviousDesktop"], "Display Previous Desktop")
self.Menu.AppendSeparator()
self.Menu.Append(self.EventIDs["Exit"], "Exit")
def bind_events():
self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK,
lambda event: self.Controller.icon_dbl_click())
self.Bind(wx.EVT_TASKBAR_RIGHT_UP,
lambda event: self.PopupMenu(self.Menu))
self.Bind(wx.EVT_MENU,
lambda event: self.Controller.exit(),
id=self.EventIDs["Exit"])
self.Bind(wx.EVT_MENU,
lambda event: self.Controller.display_next(),
id=self.EventIDs["NextDesktop"])
self.Bind(wx.EVT_MENU,
lambda event: self.Controller.display_previous(),
id=self.EventIDs["PreviousDesktop"])
create_child_widgets()
configure()
bind_events()
class GeneralTab(wx.Panel):
def __init__(self, parent, controller):
wx.Panel.__init__(self, parent=parent, id=wx.ID_ANY)
self.Controller = controller
def create_child_widgets():
self.RootSizer = wx.BoxSizer(wx.VERTICAL)
self.stNumDesktops = wx.StaticText(self, wx.ID_ANY,
label="Number of Desktops?")
self.scNumDesktops = wx.SpinCtrl(self, wx.ID_ANY, size=(40, -1))
self.sbDesktopIndicator = wx.CheckBox(self, wx.ID_ANY,
"Show Desktop Indicator?")
self.stDesktopIndicatorDelay = wx.StaticText(self, wx.ID_ANY,
label="Desktop Indicator Delay?")
self.slDesktopIndicatorDelay = wx.Slider(self, wx.ID_ANY, 1, 0, 30,
wx.DefaultPosition, (250, -1), wx.SL_HORIZONTAL)
self.sbStartWithWindows = wx.CheckBox(self, wx.ID_ANY,
"Start with Windows?")
self.btnKeyboardShortcuts = wx.Button(self, wx.ID_ANY,
"Keyboard Shortcuts", (50, 130))
def configure():
def horizontal_sizer(Widgets):
sizer = wx.BoxSizer(wx.HORIZONTAL)
for Widget in Widgets:
sizer.Add(Widget, 0, wx.ALL, 5)
return sizer
self.scNumDesktops.SetRange(1, 4)
self.scNumDesktops.SetValue(4)
sizerNumDesktops = horizontal_sizer((self.stNumDesktops, self.scNumDesktops))
sizerDesktopIndicatorDelay = horizontal_sizer(
(self.stDesktopIndicatorDelay,
self.slDesktopIndicatorDelay)
)
self.RootSizer.Add(sizerNumDesktops, 0, wx.LEFT)
self.RootSizer.Add(self.sbStartWithWindows, 0, wx.LEFT)
self.RootSizer.AddSpacer(5)
self.RootSizer.Add(self.sbDesktopIndicator, 0, wx.LEFT)
self.RootSizer.Add(sizerDesktopIndicatorDelay, 0, wx.LEFT)
self.RootSizer.AddSpacer(10)
self.RootSizer.Add(self.btnKeyboardShortcuts, 0, wx.LEFT)
self.SetSizer(self.RootSizer)
def bind_events():
pass
create_child_widgets()
configure()
bind_events()
class PreferencesNotebook(wx.Notebook):
def __init__(self, Parent, Controller):
wx.Notebook.__init__(self, Parent, id=wx.ID_ANY, style=wx.BK_DEFAULT)
self.Controller = Controller
def create_child_widgets():
self.General = GeneralTab(self, Controller)
def configure():
self.AddPage(self.General, "General")
def bind_events():
pass
create_child_widgets()
configure()
bind_events()
class Window(wx.Frame):
def __init__(self, Controller):
wx.Frame.__init__(self, None, -1, "WindowPusher", (100, 100), (300, 300),
style=wx.CLOSE_BOX | wx.SYSTEM_MENU | wx.CAPTION | wx.FRAME_TOOL_WINDOW)
self.Controller = Controller
self.EventIDs = []
def create_child_widgets():
self.Panel = wx.Panel(self, wx.ID_ANY)
self.RootSizer = wx.BoxSizer(wx.VERTICAL)
self.Notebook = PreferencesNotebook(self.Panel, Controller)
self.btnClose = wx.Button(self.Panel, wx.ID_ANY, "Close", (50, 130))
self.btnApply = wx.Button(self.Panel, wx.ID_ANY, "Apply", (50, 130))
def configure():
self.Controller = Controller
self.EventIDs = []
ButtonSizer = wx.BoxSizer(wx.HORIZONTAL)
ButtonSizer.Add(self.btnClose, 0, wx.ALL, 5)
ButtonSizer.Add(self.btnApply, 0, wx.ALL, 5)
self.RootSizer.Add(self.Notebook, 1, wx.EXPAND | wx.ALL, 5)
self.RootSizer.Add(ButtonSizer, 0, wx.ALIGN_BOTTOM | wx.ALIGN_RIGHT, 5)
self.Panel.SetSizer(self.RootSizer)
self.Panel.Layout()
# self.Layout()
def bind_events():
self.Bind(wx.EVT_CLOSE, lambda Event: self.Controller.close())
self.btnClose.Bind(wx.EVT_BUTTON, lambda Event: self.Controller.close())
self.btnApply.Bind(wx.EVT_BUTTON, lambda Event: self.Controller.close())
create_child_widgets()
configure()
bind_events()
|
# -*- coding: utf-8 -*-
import unittest
from equivalence.AstraRastr import RASTR
from equivalence.Load import LoadFile
from equivalence.actions.zeroing import Zeroing
from equivalence.tables.Tables import Node
from equivalence.test.model_test_RUSTab.path_model import absolute_path_to_file
class TestZeroing(unittest.TestCase):
def setUp(self) -> None:
# загружаем тестовую 9-ти узловую схему RastrWin3
LoadFile(rastr_win=RASTR) \
.load(
name_shabl_russian='динамика',
path_file=rf'{absolute_path_to_file}\model_test_RUSTab\test9.rst')
self.zeroing_obj = Zeroing(rastr_win=RASTR)
def test_row(self):
self.zeroing_obj.node()
_table = RASTR.Tables(Node.table)
_table.SetSel("sel=1")
count_table_node = _table.Count
self.assertEqual(
first=count_table_node,
second=0
)
|
""" Requires OpenCV2:
https://docs.opencv.org/master/d7/d9f/tutorial_linux_install.html
"""
import pickle
import socket
import struct
import numpy as np
import cv2
from snr.proc_endpoint import ProcEndpoint
from snr.node import Node
from snr.utils import debug
from snr.cv import find_plants
from snr.cv.boxes import apply_boxes
HOST = "localhost"
# Number of frames to skip to calculate the box
FRAME_SKIP_COUNT = 5
# Title of the window
WINDOW_TITLE = 'Video'
TICK_RATE_HZ = 0.0 # never sleep the server
class VideoReceiver(ProcEndpoint):
"""Video stream receiving endpoint.
Shows video received over IP in window.
"""
def __init__(self, parent: Node, name: str,
receiver_port: int):
super().__init__(parent, name,
self.init_receiver, self.monitor_stream,
TICK_RATE_HZ)
self.receiver_port = receiver_port
self.window_name = f"Raspberry Pi Stream: {self.name}"
self.count = 0 # Frame count
self.boxes = [] # Cache of cv boxes
self.start_loop()
def init_receiver(self):
try:
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.dbg("camera_event",
"{}: Socket created on {}",
[self.name, self.receiver_port])
self.s.bind((HOST, self.receiver_port))
self.s.listen(10)
self.dbg("camera_event",
"{}: Socket now listening on {}",
[self.name, self.receiver_port])
self.conn, self.addr = self.s.accept()
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise(e)
else:
self.set_terminate_flag()
return
self.data = b''
self.payload_size = struct.calcsize("=L")
def monitor_stream(self):
try:
# Retrieve message size
while len(self.data) < self.payload_size:
self.data += self.conn.recv(4096)
packed_msg_size = self.data[:self.payload_size]
self.data = self.data[self.payload_size:]
msg_size = struct.unpack("=L", packed_msg_size)[0]
# Retrieve all data based on message size
while len(self.data) < msg_size:
self.data += self.conn.recv(4096)
frame_data = self.data[:msg_size]
self.data = self.data[msg_size:]
# Extract frame
frame = pickle.loads(frame_data)
self.count += 1
# Select frames for processing
if ((self.count % FRAME_SKIP_COUNT) == 0):
self.boxes = find_plants.box_image(frame)
frame = apply_boxes(frame,
self.boxes,
find_plants.color,
find_plants.LINE_THICKNESS)
# Display
cv2.imshow(self.window_name, frame)
cv2.waitKey(15)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise(e)
self.dbg("camera_error",
"receiver monitor error: {}",
[e])
self.set_terminate_flag()
def terminate(self):
cv2.destroyAllWindows()
self.parent.datastore.store(f"{self.name}_recvd_frames", self.count)
|
#!/usr/bin/python
# Probably belongs here...
from flask import Flask, request, jsonify, send_from_directory
from datetime import datetime
import datahit
from datahit import datahithtml, datahitlog
import json
from elasticsearch import Elasticsearch
import yaml
import sys
app = Flask(__name__, static_url_path='')
rundate = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
# TODO: Make config file a command line option
config = yaml.safe_load(open("config.yaml"))
app = Flask(__name__)
# TODO: Make ES information configurable in config file
eshost = config['eshost']
esport = int(config['esport'])
esindex = config['esindex']
esdoc_type = "datarec"
es = Elasticsearch([{'host': eshost, 'port': esport}])
datahitlog.log("Connected to Elasticsearch")
esMappings = {
"mappings" : {
esdoc_type : {
"properties" : {
"userID" : {
"type" : "string",
"index" : "not_analyzed"
},
"recID" : {
"type" : "string",
"index" : "not_analyzed"
}
}
}
}
}
res = es.indices.create(index=esindex, ignore=400, body=esMappings)
# For development until/when auth is implemented
userID = "869b0838-b103-4b79-bc02-d9ea6a9d6710" # this is a random guid
@app.route("/editor", methods=["POST","GET"])
def datahiteditor():
recID = request.form['recID']
datarec = {}
query = {"query": { "ids" : { "type" : "datarec", "values" : [recID] }}}
r = es.search(index=esindex, body=query)
hitsTotal = int(r['hits']['total'])
if hitsTotal == 0:
with open('static/schemas/default.json') as f:
datarec = json.loads(f.read())
datahitlog.log("Loading default record data.")
elif hitsTotal == 1:
datarec = r['hits']['hits'][0]['_source']['datarec']
datahitlog.log("Loading data record " + recID)
values = {
'date': rundate,
'recID': recID,
'datarec': json.dumps(datarec)
}
return datahithtml.render("templates/editor.html", values, config['cacheJavascript'])
@app.route("/list")
def hitlist():
query = {
"query" : {
"constant_score" : {
"filter" : {
"term" : {
"userID" : userID
}
}
}
}
}
print(json.dumps(query))
r = es.search(index=esindex, doc_type=esdoc_type, body=query)
recTable = datahithtml.createRecTable(r['hits']['hits'])
values = {'recTable': recTable }
return datahithtml.render("templates/list.html", values, config['cacheJavascript'])
@app.route("/saverec", methods=["POST"])
def saveplan():
recDoc = request.get_json()
recDoc['userID'] = userID
res = es.index(index=esindex, doc_type=esdoc_type, id=recDoc['recID'], body=recDoc)
print(json.dumps(res))
return "Record saved!"
@app.route('/static/<path:path>')
def send_static(path):
return send_from_directory('static', path)
if __name__ == '__main__':
app.run(host='0.0.0.0',port=config['port'],threaded=True)
|
# Copyright (c) 2008 Mikael Lind
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import numpy, pygame
from pygame.locals import *
from OpenGL.GL import *
class Display(object):
def __init__(self, (width, height), caption):
self.__width = width
self.__height = height
pygame.display.set_mode((width, height),
OPENGL | DOUBLEBUF | SWSURFACE)
pygame.display.set_caption("%s - tics" % caption)
glClearColor(0.0, 0.0, 0.0, 0.0)
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPixelStorei(GL_PACK_ALIGNMENT, 1)
glPixelStorei(GL_PACK_SKIP_PIXELS, 0)
glPixelStorei(GL_PACK_SKIP_ROWS, 0)
glPixelStorei(GL_PACK_SKIP_IMAGES, 0)
glPixelStorei(GL_PACK_ROW_LENGTH, self.__width)
glPixelStorei(GL_PACK_IMAGE_HEIGHT, self.__height)
def draw_image(self, image):
glClear(GL_COLOR_BUFFER_BIT)
image.draw()
pygame.display.flip()
def read_pixels(self):
glFinish()
pixels = glReadPixelsub(0, 0, self.__width, self.__height, GL_RGB)
return numpy.array(pixels.flat)
|
# Import Libraries
import pandas as pd
import numpy as np
import yfinance as yf
import time
# Import Libraries
from scipy import stats
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# Import Libraries
from ta.momentum import RSIIndicator
from ta.trend import SMAIndicator
# import numpy as np
# import pandas as pd
# import matplotlib.pyplot as plt
# import yfinance as yf
import math
class YinsML:
"""
Yin's Machine Learning Package
Copyright © W.Y.N. Associates, LLC, 2009 – Present
"""
# Define function
def LogisticRegression_Classifier(X_train, X_test, y_train, y_test, random_state = 0):
# Import Modules
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
from sklearn.linear_model import LogisticRegression
# Train
LOGIT_Clf = LogisticRegression( random_state=random_state )
LOGIT_Clf = LOGIT_Clf.fit(X_train, y_train)
# Report In-sample Estimators
y_train_hat_ = LOGIT_Clf.predict(X_train)
y_train_hat_score = LOGIT_Clf.predict_proba(X_train)
from sklearn.metrics import confusion_matrix
confusion_train = pd.DataFrame(confusion_matrix(y_train_hat_, y_train))
confusion_train
train_acc = sum(np.diag(confusion_train)) / sum(sum(np.array(confusion_train)))
train_acc
y_test_hat_ = LOGIT_Clf.predict(X_test)
y_test_hat_score = LOGIT_Clf.predict_proba(X_test)
confusion_test = pd.DataFrame(confusion_matrix(y_test_hat_, y_test))
confusion_test
test_acc = sum(np.diag(confusion_test)) / sum(sum(np.array(confusion_test)))
test_acc
# Output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': LOGIT_Clf,
'Train Result': {
'y_train_hat_': y_train_hat_,
'y_train_hat_score': y_train_hat_score,
'confusion_train': confusion_train,
'train_acc': train_acc
},
'Test Result': {
'y_test_hat_': y_test_hat_,
'y_test_hat_score': y_test_hat_score,
'confusion_test': confusion_test,
'test_acc': test_acc
}
}
# End of function
# Define function
def KNN_Classifier(X_train, X_test, y_train, y_test, n_neighbors = 3):
# Import Modules
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
from sklearn.neighbors import KNeighborsClassifier
# Train
KNN_Clf = KNeighborsClassifier( n_neighbors=n_neighbors )
KNN_Clf = KNN_Clf.fit(X_train, y_train)
# Report In-sample Estimators
y_train_hat_ = KNN_Clf.predict(X_train)
y_train_hat_score = KNN_Clf.predict_proba(X_train)
from sklearn.metrics import confusion_matrix
confusion_train = pd.DataFrame(confusion_matrix(y_train_hat_, y_train))
confusion_train
train_acc = sum(np.diag(confusion_train)) / sum(sum(np.array(confusion_train)))
train_acc
y_test_hat_ = KNN_Clf.predict(X_test)
y_test_hat_score = KNN_Clf.predict_proba(X_test)
confusion_test = pd.DataFrame(confusion_matrix(y_test_hat_, y_test))
confusion_test
test_acc = sum(np.diag(confusion_test)) / sum(sum(np.array(confusion_test)))
test_acc
# Output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': KNN_Clf,
'Train Result': {
'y_train_hat_': y_train_hat_,
'y_train_hat_score': y_train_hat_score,
'confusion_train': confusion_train,
'train_acc': train_acc
},
'Test Result': {
'y_test_hat_': y_test_hat_,
'y_test_hat_score': y_test_hat_score,
'confusion_test': confusion_test,
'test_acc': test_acc
}
}
# End of function
# Define function
def DecisionTree_Classifier(
X_train, X_test, y_train, y_test, maxdepth = 3,
verbose=True,
figsize=(12,6),
fontsize=12):
# Import Modules
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
from sklearn import tree
# Train
DCT = tree.DecisionTreeClassifier(max_depth=maxdepth)
DCT = DCT.fit(X_train, y_train)
# Plot
if verbose:
plt.figure(figsize=figsize)
tree.plot_tree(DCT, feature_names=X_train.columns, fontsize=fontsize)
# Report In-sample Estimators
y_train_hat_ = DCT.predict(X_train)
y_train_hat_score = DCT.predict_proba(X_train)
from sklearn.metrics import confusion_matrix
confusion_train = pd.DataFrame(confusion_matrix(y_train_hat_, y_train))
confusion_train
train_acc = sum(np.diag(confusion_train)) / sum(sum(np.array(confusion_train)))
train_acc
y_test_hat_ = DCT.predict(X_test)
y_test_hat_score = DCT.predict_proba(X_test)
confusion_test = pd.DataFrame(confusion_matrix(y_test_hat_, y_test))
confusion_test
test_acc = sum(np.diag(confusion_test)) / sum(sum(np.array(confusion_test)))
test_acc
# Output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': DCT,
'Train Result': {
'y_train_hat_': y_train_hat_,
'y_train_hat_score': y_train_hat_score,
'confusion_train': confusion_train,
'train_acc': train_acc
},
'Test Result': {
'y_test_hat_': y_test_hat_,
'y_test_hat_score': y_test_hat_score,
'confusion_test': confusion_test,
'test_acc': test_acc
}
}
# End of function
# define function
def DecisionTree_Regressor(
X_train, X_test, y_train, y_test,
maxdepth=3,
verbose=True,
figsize=(12,6),
fontsize=12):
# Import Modules
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
from sklearn import tree
# Train
DCT = tree.DecisionTreeRegressor(max_depth=maxdepth)
DCT = DCT.fit(X_train, y_train)
# Report In-sample Estimators
y_train_hat_ = DCT.predict(X_train)
RMSE_train = np.sqrt(np.mean((y_train_hat_ - y_train)**2))
# Report Out-of-sample Estimators
y_test_hat_ = DCT.predict(X_test)
RMSE_test = np.sqrt(np.mean((y_test_hat_ - y_test)**2))
# Plot
if verbose:
plt.figure(figsize=figsize)
tree.plot_tree(DCT, feature_names=X_train.columns, fontsize=fontsize)
# Output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': DCT,
'Train Result': {
'y_train_hat_': y_train_hat_,
'RMSE_train': RMSE_train
},
'Test Result': {
'y_test_hat_': y_test_hat_,
'RMSE_test': RMSE_test
}
}
# End of function
# Define function
def RandomForest_Classifier(X_train, X_test, y_train, y_test, maxdepth = 3):
# Import Modules
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
from sklearn import ensemble
# Train
RF_Clf = ensemble.RandomForestClassifier(max_depth=maxdepth)
RF_Clf = RF_Clf.fit(X_train, y_train)
# Report In-sample Estimators
y_train_hat_ = RF_Clf.predict(X_train)
y_train_hat_score = RF_Clf.predict_proba(X_train)
from sklearn.metrics import confusion_matrix
confusion_train = pd.DataFrame(confusion_matrix(y_train_hat_, y_train))
confusion_train
train_acc = sum(np.diag(confusion_train)) / sum(sum(np.array(confusion_train)))
train_acc
y_test_hat_ = RF_Clf.predict(X_test)
y_test_hat_score = RF_Clf.predict_proba(X_test)
confusion_test = pd.DataFrame(confusion_matrix(y_test_hat_, y_test))
confusion_test
test_acc = sum(np.diag(confusion_test)) / sum(sum(np.array(confusion_test)))
test_acc
# Output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': RF_Clf,
'Train Result': {
'y_train_hat_': y_train_hat_,
'y_train_hat_score': y_train_hat_score,
'confusion_train': confusion_train,
'train_acc': train_acc
},
'Test Result': {
'y_test_hat_': y_test_hat_,
'y_test_hat_score': y_test_hat_score,
'confusion_test': confusion_test,
'test_acc': test_acc
}
}
# End of function
# define function
def RandomForest_Regressor(
X_train, X_test,
y_train, y_test,
n_trees=100,
maxdepth=3,
figsize=(4,4),
dpi=800,
font_size=12,
verbose=True):
# Import Modules
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
from sklearn import tree
from sklearn.ensemble import RandomForestRegressor
import time
# Train
RF = RandomForestRegressor(
n_estimators=n_trees,
max_depth=maxdepth)
RF = RF.fit(X_train, y_train)
# Visualization
if verbose:
cn=None
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=figsize, dpi=dpi)
tree.plot_tree(RF.estimators_[0],
feature_names = X_train.columns,
class_names=cn,
filled = True);
fig.savefig('rf_individualtree.png')
# Feature Importance
if verbose:
start_time = time.time()
importances = RF.feature_importances_
std = np.std([tree.feature_importances_ for tree in RF.estimators_], axis=0)
elapsed_time = time.time() - start_time
print(f"Elapsed time to compute the importances: {elapsed_time:.3f} seconds")
forest_importances = pd.Series(importances, index=X_train.columns)
fig, ax = plt.subplots(figsize=figsize)
forest_importances.plot.bar(yerr=std, ax=ax)
plt.rc('font', size=font_size)
ax.set_title("Feature importances using MDI")
ax.set_ylabel("Mean Decrease in Impurity (MDI)")
# fig.tight_layout()
# Report In-sample Estimators
y_train_hat_ = RF.predict(X_train)
RMSE_train = np.sqrt(np.mean((y_train_hat_ - y_train)**2))
# Report Out-of-sample Estimators
y_test_hat_ = RF.predict(X_test)
RMSE_test = np.sqrt(np.mean((y_test_hat_ - y_test)**2))
# Output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': RF,
'Train Result': {
'y_train_hat_': y_train_hat_,
'RMSE_train': RMSE_train
},
'Test Result': {
'y_test_hat_': y_test_hat_,
'RMSE_test': RMSE_test
}
}
# End of function
# Define function
def GradientBoosting_Classifier(X_train, X_test, y_train, y_test,
n_estimators = 100,
learning_rate = 0.2,
maxdepth = 3,
random_state = 0):
# Import Modules
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import random
from sklearn import ensemble
# Train
GB_Clf = ensemble.GradientBoostingClassifier(
n_estimators=n_estimators, learning_rate=learning_rate, max_depth=maxdepth, random_state=random_state )
GB_Clf = GB_Clf.fit(X_train, y_train)
# Report In-sample Estimators
y_train_hat_ = GB_Clf.predict(X_train)
y_train_hat_score = GB_Clf.predict_proba(X_train)
from sklearn.metrics import confusion_matrix
confusion_train = pd.DataFrame(confusion_matrix(y_train_hat_, y_train))
confusion_train
train_acc = sum(np.diag(confusion_train)) / sum(sum(np.array(confusion_train)))
train_acc
y_test_hat_ = GB_Clf.predict(X_test)
y_test_hat_score = GB_Clf.predict_proba(X_test)
confusion_test = pd.DataFrame(confusion_matrix(y_test_hat_, y_test))
confusion_test
test_acc = sum(np.diag(confusion_test)) / sum(sum(np.array(confusion_test)))
test_acc
# Output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': GB_Clf,
'Train Result': {
'y_train_hat_': y_train_hat_,
'y_train_hat_score': y_train_hat_score,
'confusion_train': confusion_train,
'train_acc': train_acc
},
'Test Result': {
'y_test_hat_': y_test_hat_,
'y_test_hat_score': y_test_hat_score,
'confusion_test': confusion_test,
'test_acc': test_acc
}
}
# End of function
# define function
def GradientBoosting_Regressor(
X_train, X_test, y_train, y_test,
n_estimators = 100,
learning_rate = 0.2,
maxdepth = 3,
random_state = 0):
# Import Modules
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import ensemble
import time
# Train
GB_Reg = ensemble.GradientBoostingRegressor(
n_estimators=n_estimators, learning_rate=learning_rate, max_depth=maxdepth, random_state=random_state )
GB_Reg = GB_Reg.fit(X_train, y_train)
# Features
feature_importance = pd.DataFrame([GB_Reg.feature_importances_], columns=X_train.columns)
# Report In-sample Estimators
y_train_hat_ = GB_Reg.predict(X_train)
RMSE_train = np.sqrt(np.mean((y_train_hat_ - y_train)**2))
# Report Out-of-sample Estimators
y_test_hat_ = GB_Reg.predict(X_test)
RMSE_test = np.sqrt(np.mean((y_test_hat_ - y_test)**2))
# Output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': GB_Reg,
'Feature Importance': feature_importance,
'Train Result': {
'y_train_hat_': y_train_hat_,
'RMSE_train': RMSE_train
},
'Test Result': {
'y_test_hat_': y_test_hat_,
'RMSE_test': RMSE_test
}
}
# End of function
# define SVM_Regressor function:
def SVM_Regressor(
X_train=None,
y_train=None,
X_valid=None,
y_valid=None,
X_test=None,
y_test=None,
useStandardScaler=True,
kernel='rbf', gamma='auto',
C=1.0, epsilon=0.2,
axis_font_size=20,
verbose=True
):
# library
import pandas as pd
import time
# checkpoint
start = time.time()
# build model
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import numpy as np
# source: https://scikit-learn.org/stable/modules/generated/sklearn.svm.SVR.html
if useStandardScaler:
regr = make_pipeline(StandardScaler(), SVR(kernel=kernel, gamma=gamma, C=C, epsilon=epsilon, verbose=verbose))
else:
# kernel='rbf', gamma='auto', C=1.0, epsilon=0.2
regr = SVR(kernel=kernel, gamma=gamma, C=C, epsilon=epsilon, verbose=verbose)
# fit model
regr.fit(X_train, y_train)
# checkpoint
end = time.time()
if verbose:
print('Training time consumption ' + str(end-start) + ' seconds.')
# prediction on train set
y_train_hat_ = regr.predict(X_train)
# prediction on test set
y_test_hat_ = regr.predict(X_test)
# library
import numpy as np
# mean square error on train set
y_train_hat_ = y_train_hat_.reshape(-1)
RMSE_train = (np.sum((y_train_hat_ - y_train) ** 2) / len(y_train)) ** 0.5
# mean square error on test set
y_test_hat_ = y_test_hat_.reshape(-1)
RMSE_test = (np.sum((y_test_hat_ - y_test) ** 2) / len(y_test)) ** 0.5
# visualize
if verbose:
import seaborn as sns
residuals = y_test - y_test_hat_
residuals = pd.Series(residuals, name='Residuials')
fitted = pd.Series(y_test_hat_, name='Fitted Value')
ax = sns.regplot(x=residuals, y=fitted, color='g').set(title='Residuals vs. Fitted Values (Test)')
print("Reminder: A good fit leads to Gaussian-like residuals.")
# Output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': regr,
'Train Result': {
'y_train_hat_': y_train_hat_,
'RMSE_train': RMSE_train
},
'Test Result': {
'y_test_hat_': y_test_hat_,
'RMSE_test': RMSE_test
}
}
# End of function
# Define function
def Adam_Regressor(Xadam, y, batch_size = 10, lr = 0.01, epochs = 200, period = 20, verbose=True):
# Library
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# Adam
def adam(params, vs, sqrs, lr, batch_size, t):
beta1 = 0.1
beta2 = 0.111
eps_stable = 1e-9
for param, v, sqr in zip(params, vs, sqrs):
g = param.grad / batch_size
v[:] = beta1 * v + (1. - beta1) * g
sqr[:] = beta2 * sqr + (1. - beta2) * nd.square(g)
v_bias_corr = v / (1. - beta1 ** t)
sqr_bias_corr = sqr / (1. - beta2 ** t)
div = lr * v_bias_corr / (nd.sqrt(sqr_bias_corr) + eps_stable)
param[:] = param - div
# Library
import mxnet as mx
from mxnet import autograd
from mxnet import ndarray as nd
from mxnet import gluon
import random
mx.random.seed(1)
random.seed(1)
# Generate data.
# Xadam = covid19_confirmed_china_rolling_data.iloc[:, [1,2,3,5]] <=== this is input
num_inputs = pd.DataFrame(Xadam).shape[1]
num_examples = pd.DataFrame(Xadam).shape[0]
X = nd.array(Xadam)
# y = nd.array(covid19_confirmed_china_rolling_data['Y']) <=== this is input
dataset = gluon.data.ArrayDataset(X, y)
# Construct data iterator.
def data_iter(batch_size):
idx = list(range(num_examples))
random.shuffle(idx)
for batch_i, i in enumerate(range(0, num_examples, batch_size)):
j = nd.array(idx[i: min(i + batch_size, num_examples)])
yield batch_i, X.take(j), y.take(j)
# Initialize model parameters.
def init_params():
w = nd.random_normal(scale=1, shape=(num_inputs, 1))
b = nd.zeros(shape=(1,))
params = [w, b]
vs = []
sqrs = []
for param in params:
param.attach_grad()
vs.append(param.zeros_like())
sqrs.append(param.zeros_like())
return params, vs, sqrs
# Linear regression.
def net(X, w, b):
return nd.dot(X, w) + b
# Loss function.
def square_loss(yhat, y):
return (yhat - y.reshape(yhat.shape)) ** 2 / 2
# %matplotlib inline
import matplotlib as mpl
mpl.rcParams['figure.dpi']= 120
import matplotlib.pyplot as plt
import numpy as np
def train(batch_size, lr, epochs, period):
assert period >= batch_size and period % batch_size == 0
[w, b], vs, sqrs = init_params()
total_loss = [np.mean(square_loss(net(X, w, b), y).asnumpy())]
t = 0
# Epoch starts from 1.
for epoch in range(1, epochs + 1):
for batch_i, data, label in data_iter(batch_size):
with autograd.record():
output = net(data, w, b)
loss = square_loss(output, label)
loss.backward()
# Increment t before invoking adam.
t += 1
adam([w, b], vs, sqrs, lr, batch_size, t)
if batch_i * batch_size % period == 0:
total_loss.append(np.mean(square_loss(net(X, w, b), y).asnumpy()))
print("Batch size %d, Learning rate %f, Epoch %d =========================> loss %.4e" %
(batch_size, lr, epoch, total_loss[-1]))
print('w:', np.reshape(w.asnumpy(), (1, -1)),
'b:', b.asnumpy()[0], '\n')
x_axis = np.linspace(0, epochs, len(total_loss), endpoint=True)
plt.semilogy(x_axis, total_loss)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
return w, b
w, b = train(batch_size = batch_size, lr = lr, epochs = epochs, period = period)
w_adam = []
for w_i in range(len(list(w.asnumpy()))):
w_adam.append(list(w.asnumpy())[w_i][0])
if verbose:
print('Weight:', w_adam)
b_adam = list(b.asnumpy())[0]
if verbose:
print('Bias:', b_adam)
y_hat_adam = np.dot(Xadam, w_adam) + b_adam
return {
'parameters': {'w': w, 'b': b},
'y_estimate': y_hat_adam
}
# End of function
# Define function
def ResultAUCROC(y_test, y_test_hat):
from sklearn.metrics import roc_curve, auc, roc_auc_score
fpr, tpr, thresholds = roc_curve(y_test, y_test_hat)
areaUnderROC = auc(fpr, tpr)
resultsROC = {
'false positive rate': fpr,
'true positive rate': tpr,
'thresholds': thresholds,
'auc': round(areaUnderROC, 3) }
import matplotlib.pyplot as plt
plt.figure()
plt.plot(fpr, tpr, color='r', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='k', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: \
Area under the curve = {0:0.3f}'.format(areaUnderROC))
plt.legend(loc="lower right")
plt.show()
def AutoMachineLearningClassifier(
X = None,
y = None,
cutoff = 0.1,
random_state = 123,
selected_algorithm = ['AdaBoostClassifier', 'BaggingClassifier', 'BernoulliNB', 'CalibratedClassifierCV', 'DecisionTreeClassifier', 'DummyClassifier', 'ExtraTreeClassifier', 'ExtraTreesClassifier', 'GaussianNB', 'KNeighborsClassifier', 'LabelPropagation', 'LabelSpreading', 'LinearDiscriminantAnalysis', 'LinearSVC', 'LogisticRegression', 'NearestCentroid', 'NuSVC', 'PassiveAggressiveClassifier', 'Perceptron', 'QuadraticDiscriminantAnalysis', 'RandomForestClassifier', 'RidgeClassifier', 'RidgeClassifierCV', 'SGDClassifier', 'SVC', 'XGBClassifier', 'LGBMClassifier']
):
# library
import lazypredict
from lazypredict.Supervised import LazyClassifier
# split train test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=cutoff, random_state=random_state)
# fit
clf = LazyClassifier(verbose=0, ignore_warnings=True, custom_metric=None)
results, predictions = clf.fit(X_train, X_test, y_train, y_test)
models_ = clf.provide_models(X_train, X_test, y_train, y_test)
# prediction
y_train_hat_mat_ = []
y_test_hat_mat_ = []
for some_algo in selected_algorithm:
y_train_hat_mat_.append(models_[some_algo].predict(X_train))
y_test_hat_mat_.append(models_[some_algo].predict(X_test))
# convert
y_train_hat_mat_ = pd.DataFrame(np.asarray(y_train_hat_mat_)).transpose().values
y_test_hat_mat_ = pd.DataFrame(np.asarray(y_test_hat_mat_)).transpose().values
# output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': models_,
'List of Algorithms': models_.keys(),
'Results': results,
'Predictions': {
'y_train_hat_mat_': y_train_hat_mat_,
'y_test_hat_mat_': y_test_hat_mat_
}
}
def AutoMachineLearningRegressor(
X = None,
y = None,
cutoff = 0.1,
random_state = 123,
selected_algorithm = ['AdaBoostRegressor', 'BaggingRegressor', 'BayesianRidge', 'DecisionTreeRegressor', 'DummyRegressor', 'ElasticNet', 'ElasticNetCV', 'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GammaRegressor', 'GaussianProcessRegressor', 'GeneralizedLinearRegressor', 'GradientBoostingRegressor', 'HistGradientBoostingRegressor', 'HuberRegressor', 'KNeighborsRegressor', 'KernelRidge', 'Lars', 'LarsCV', 'Lasso', 'LassoCV', 'LassoLars', 'LassoLarsCV', 'LassoLarsIC', 'LinearRegression', 'LinearSVR', 'MLPRegressor', 'NuSVR', 'OrthogonalMatchingPursuit', 'OrthogonalMatchingPursuitCV', 'PassiveAggressiveRegressor', 'PoissonRegressor', 'RANSACRegressor', 'RandomForestRegressor', 'Ridge', 'RidgeCV', 'SGDRegressor', 'SVR', 'TransformedTargetRegressor', 'TweedieRegressor', 'XGBRegressor', 'LGBMRegressor']
):
# library
import lazypredict
from lazypredict.Supervised import LazyRegressor
# split train test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=cutoff, random_state=random_state)
# fit
reg = LazyRegressor(verbose=0, ignore_warnings=True, custom_metric=None)
results, predictions = reg.fit(X_train, X_test, y_train, y_test)
models_ = reg.provide_models(X_train, X_test, y_train, y_test)
# prediction
y_train_hat_mat_ = []
y_test_hat_mat_ = []
for some_algo in selected_algorithm:
y_train_hat_mat_.append(models_[some_algo].predict(X_train))
y_test_hat_mat_.append(models_[some_algo].predict(X_test))
# convert
y_train_hat_mat_ = pd.DataFrame(np.asarray(y_train_hat_mat_)).transpose().values
y_test_hat_mat_ = pd.DataFrame(np.asarray(y_test_hat_mat_)).transpose().values
# output
return {
'Data': {
'X_train': X_train,
'y_train': y_train,
'X_test': X_test,
'y_test': y_test
},
'Model': models_,
'List of Algorithms': models_.keys(),
'Results': results,
'Predictions': {
'y_train_hat_mat_': y_train_hat_mat_,
'y_test_hat_mat_': y_test_hat_mat_
}
}
|
import zipfile
import tempfile
from typing import Union
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow import keras
from PIL import Image
from keras.preprocessing import image
import numpy as np
import datetime
from matplotlib import pyplot as plt
from tensorflow.keras.layers import *
import os
from tqdm import trange
import keras.models
from tabulate import tabulate
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.applications import ResNet50V2
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
import cv2
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.optimizers import Adam
import traceback
class KerasTrain(object):
def __init__(self, model=None, batch_size=32, epochs=25, workers=1, use_multiprocessing=False) -> None:
super().__init__()
self.model = model
self.batch_size = batch_size
self.epochs = epochs
self.workers = workers
self.use_multiprocessing = use_multiprocessing
self.model_classes_ = {}
self.size = (150, 150)
self.lr = 1e-5
self.log_dir = "logs/fit/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
prototxtPath = "deploy.prototxt"
weightsPath = "res10_300x300_ssd_iter_140000.caffemodel"
print("[INFO] Loading face detector model...")
self.net = cv2.dnn.readNet(prototxtPath, weightsPath)
self.tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=self.log_dir, histogram_freq=1)
for i, _dir in enumerate(os.listdir("dataset"), 0):
self.model_classes_[_dir] = i
def loadTensorImg(self, img_path):
img = image.load_img(img_path, target_size=self.size)
imgTensor = image.img_to_array(img)
# (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
imgTensor = np.expand_dims(imgTensor, axis=0)
# imshow expects values in the range [0, 1]
imgTensor /= 255.
return imgTensor
def predict(self, img: Union[str, np.ndarray], mode="category", modelPath=None, **kwargs):
# model = keras.models.load_model("model.h5")
if modelPath is not None:
self.model = self.loadModel(modelPath)
if isinstance(img, str) and not img.endswith(".png"):
imgPil = Image.open(img)
imgPil = imgPil.convert("RGB")
imgPil.save(img.split(".")[0] + ".png")
# imageToPredict = self.loadTensorImg(imgPath)
prediction = self.model.predict(img, callbacks=[
self.tensorboard_callback], workers=self.workers, use_multiprocessing=self.use_multiprocessing, **kwargs)
return prediction
def train(self, imageDataGeneratorArgs: dict = {}, modelPath="model.h5", **kwargs):
aug = ImageDataGenerator(**imageDataGeneratorArgs)
files = []
dirlist = ["dataset/"]
while len(dirlist) > 0:
for (dirpath, dirnames, filenames) in os.walk(dirlist.pop()):
dirlist.extend(dirnames)
files.extend(map(lambda n: os.path.join(
*n), zip([dirpath] * len(filenames), filenames)))
data = []
labels = []
# loop over the image paths
for imagePath in files:
# extract the class label from the filename
label = imagePath.split(os.path.sep)[-2]
image = load_img(imagePath, target_size=self.size)
image = img_to_array(image)
image = preprocess_input(image)
# update the data and labels lists, respectively
data.append(image)
labels.append(label)
# convert the data and labels to NumPy arrays
data = np.array(data, dtype="float32")
labels = np.array(labels)
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
labels = to_categorical(labels)
(x_train, x_test, y_train, y_test) = train_test_split(data, labels,
test_size=0.20, stratify=labels, random_state=42)
baseModel = ResNet50V2(weights="imagenet", include_top=False,
input_tensor=Input(shape=(150, 150, 3)))
headModel = baseModel.output
headModel = Conv2D(32, kernel_size=(
3, 3), activation='relu', input_shape=(150, 150, 3))(headModel)
headModel = Conv2D(64, (3, 3), activation='relu')(headModel)
headModel = MaxPooling2D(pool_size=(1, 1))(headModel)
headModel = Dropout(0.25)(headModel)
headModel = Flatten()(headModel)
headModel = Dense(128, activation='relu')(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(len(self.model_classes_),
activation="softmax", name="output")(headModel)
# place the head FC model on top of the base model (this will become
# the actual model we will train)
self.model = Model(inputs=baseModel.input, outputs=headModel)
# loop over all layers in the base model and freeze them so they will
# *not* be updated during the first training process
for layer in baseModel.layers:
layer.trainable = False
opt = Adam(learning_rate=self.lr, decay=self.lr / self.epochs)
self.model.compile(loss="binary_crossentropy",
optimizer=opt,
metrics=["accuracy"])
history = self.model.fit(
aug.flow(x_train, y_train, batch_size=self.batch_size),
validation_data=(x_test, y_test),
validation_steps=len(x_test) // self.batch_size,
epochs=self.epochs,
steps_per_epoch=len(x_train) // self.batch_size,
workers=self.workers,
use_multiprocessing=self.use_multiprocessing,
callbacks=[self.tensorboard_callback]
)
self.model.save(modelPath)
plt.style.use("ggplot")
plt.figure()
plt.plot(history.history['accuracy'], label="train_acc")
plt.plot(history.history['val_accuracy'], label="test_acc")
plt.plot(history.history['loss'], label="train_loss")
plt.plot(history.history['val_loss'], label="test_loss")
plt.xlabel("Epochs #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig(f"loss-accuracy-{self.epochs}-{self.batch_size}.png")
plt.clf()
return history, self.model
@staticmethod
def loadModel(path="model.h5"):
model = keras.models.load_model(path)
return KerasTrain(
model=model
)
def displayPredictions(self, predictions: np.ndarray, imgPath: str, coords_msg: str):
print(f"Prediction for :{imgPath}")
dic = {}
for i in range(len(self.model_classes_)):
dic[list(self.model_classes_.keys())[i]] = predictions[0][i]*100
print(tabulate({k: f"{v:.2f}%" for k, v in dic.items()
}.items(), headers=["Class", "Confidence"]))
idx = np.argmax(predictions, axis=1)[0]
print(
f">>> Prediction final \"{list(self.model_classes_.keys())[idx]}\" with {predictions[0][idx]*100:.2f}% confidence {coords_msg}\n")
def predictDirectory(self, dirPath: str = "dataset"):
if os.path.isdir(dirPath):
files = []
dirlist = [dirPath]
while len(dirlist) > 0:
for (dirpath, dirnames, filenames) in os.walk(dirlist.pop()):
dirlist.extend(dirnames)
files.extend(map(lambda n: os.path.join(
*n), zip([dirpath] * len(filenames), filenames)))
else:
files = [dirPath]
for i, f in enumerate(files):
if "." not in f:
continue
split_f = f.split("/")[-1].split(".")
output_dir = f"output"
if not os.path.exists(output_dir):
os.makedirs(output_dir)
f_output = f"{output_dir}/{os.path.basename(split_f[0])}-{i}.{split_f[1]}"
if f_output[-4:] == ".png" or f_output[-4:] == ".jpg":
self.detectFaceAndPredict(f, f_output)
print(f"{f_output} processed")
def detectFaceAndPredict(self, img_path: str, output_path: str):
try:
baseImage = cv2.imread(img_path)
bW, bH, bC = baseImage.shape
image = cv2.resize(baseImage, (600, 600))
resizedW, resizedH, resizedC = image.shape
(h, w) = image.shape[:2]
blob = cv2.dnn.blobFromImage(image, 1.0, (350, 350),
(104.0, 177.0, 123.0))
print("[INFO] computing face detections...")
self.net.setInput(blob)
detections = self.net.forward()
for i in range(0, detections.shape[2]):
confidence = detections[0, 0, i, 2]
if confidence > 0.4:
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
face = image[startY:endY, startX:endX]
is_single_img = False
try:
if not len(face):
face = cv2.cvtColor(baseImage, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, self.size)
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
prediction = self.predict(face)
(startX, startY, endX, endY) = (0, w - 5, h - 5, 0)
is_single_img = True
else:
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, self.size)
face = img_to_array(face)
face = preprocess_input(face)
face = np.expand_dims(face, axis=0)
prediction = self.model.predict(face)
except Exception:
traceback.print_exc()
continue
(mask, withoutMask) = prediction[0]
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
coords_msg = f"on face coords -> ({startX}, {startY}) ({endX}, {endY})"
self.displayPredictions(prediction, img_path, coords_msg)
label = "{}: {:.2f}%".format(
label, max(mask, withoutMask) * 100)
startX = round((startX / resizedH) * bH)
startY = round((startY / resizedW) * bW)
endX = round((endX / resizedH) * bH)
endY = round((endY / resizedW) * bW)
cv2.putText(baseImage, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.35, color, 1)
cv2.rectangle(baseImage, (startX, startY),
(endX, endY), color, 2)
if is_single_img:
break
cv2.imwrite(output_path, baseImage)
except Exception:
print(f"Cannot detect faces : {traceback.format_exc()}")
def testBatchSize(self):
batches = []
testBatches = []
losses = []
lossesTests = []
nbBatches = list(range(1, 257, 32))
for batch in trange(1, 257, 32):
trainer = KerasTrain(batch_size=batch, epochs=25, workers=self.workers,
use_multiprocessing=self.use_multiprocessing)
history, model = trainer.train(
dict(
rotation_range=20,
zoom_range=0.15,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest"
),
modelPath=f"model-{batch}.h5"
)
batches.append(max(history.history["accuracy"]) * 100)
testBatches.append(max(history.history["val_accuracy"]) * 100)
losses.append(max(history.history['loss']))
lossesTests.append(max(history.history['val_loss']))
plt.style.use("ggplot")
plt.plot(nbBatches, batches, label="train_acc")
plt.plot(nbBatches, testBatches, label="train_test")
plt.plot(nbBatches, losses, label="train_loss")
plt.plot(nbBatches, lossesTests, label="test_loss")
plt.title(f'Model loss/accuracy batch size variation')
plt.ylabel('Loss/Accuracy')
plt.xlabel('Batch size #')
plt.legend(loc='lower left')
plt.savefig(f"loss_accuracy-batch_size.png")
plt.clf()
if __name__ == "__main__":
trainer = KerasTrain()
trainer.testBatchSize()
|
import torch
import cv2
import numpy as np
from argparse import ArgumentParser
from torch.utils.data.sampler import SubsetRandomSampler
from models.model import Model
def argument_setting(inhert=False):
class Range(object):
def __init__(self, start, end):
self.start = start
self.end = end
def __eq__(self, other):
return self.start <= other <= self.end
parser = ArgumentParser()
parser.add_argument('--cuda', type=int, default=0,
help='set the model to run on which gpu (default: 0)')
# dataset argument
parser.add_argument('--holdout-p', type=float, default=0.8,
help='set hold out CV probability (default: 0.8)')
parser.add_argument('--num-workers', type=int, default=8,
help='set the number of processes to run (default: 8)')
# training argument
parser.add_argument('--batch-size', type=int, default=1,
help='set the batch size (default: 1)')
parser.add_argument('--epochs', type=int, default=1,
help='set the epochs (default: 1)')
parser.add_argument(
'--model', type=str,
choices=Model().get_model_list(),
metavar='MODEL_NAME',
default='VGG19',
help=f'set model name.\nThe acceptable models are {Model().get_model_list()} (default: "VGG19")'
)
parser.add_argument('--iteration', action="store_true", default=False,
help='set to decrease learning rate each iteration (default: False)')
parser.add_argument('--train-all', action="store_true", default=False,
help='set to update all parameters of model (default: False)')
# optimizer argument
parser.add_argument('--optim', type=str, default='SGD',
help='set optimizer (default: SGD)')
parser.add_argument('--lr', type=float, default=1e-5,
help='set the learning rate (default: 1e-5)')
parser.add_argument('--momentum', type=float, default=0.9,
help='set momentum of SGD (default: 0.9)')
# scheduler argument
parser.add_argument('--scheduler', action="store_true", default=False,
help='training with step or multi step scheduler (default: False)')
parser.add_argument('--gamma', type=float, default=0.99985,
help='set decreate factor (default: 0.99985)')
# post-processing argument
parser.add_argument(
'--threshold',
type=float,
choices=[Range(0.0, 1.0)],
default=0.99,
metavar='THRESHOLD',
help='the number thresholds the output answer (Float number >= 0 and <=1) (default: 0.99)'
)
parser.add_argument('--output-path', type=str, default='./output/',
help='output file (csv, txt, pth) path (default: ./output)')
parser.add_argument('--train-path', type=str, default='./data/train/',
help='training dataset path (default: ./data/train/)')
parser.add_argument('--test-path', type=str, default='./data/test1/',
help='evaluating dataset path (default: ./data/test1/)')
parser.add_argument('--submit-csv', type=str, default='./data/sample_submission.csv',
help='submission CSV file (default: ./data/sample_submission.csv)')
# for the compatiable
if inhert is True:
return parser
return parser.parse_args()
def cross_validation(full_set, p=0.8):
"""
hold out cross validation
"""
train_len = len(full_set)
# get shuffled indices
indices = np.random.permutation(range(train_len))
split_idx = int(train_len * p)
train_idx, valid_idx = indices[:split_idx], indices[split_idx:]
full_set = np.array(full_set)
train_set = full_set[list(SubsetRandomSampler(train_idx))]
valid_set = full_set[list(SubsetRandomSampler(valid_idx))]
train_set = torch.from_numpy(train_set)
valid_set = torch.from_numpy(valid_set)
return train_set, valid_set
def threshold_function(data, threshold, device='cpu'):
data = torch.where(data >= threshold, torch.tensor(1.0, dtype=data.dtype).to(device), data)
data = torch.where(data < (1-threshold), torch.tensor(0.0, dtype=data.dtype).to(device), data)
return data
def adaptive_threshold(img):
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bilateral = cv2.bilateralFilter(gray_img, 11, 75, 75)
blur = cv2.GaussianBlur(bilateral, (5, 5), 1)
adaptive_threshold = cv2.adaptiveThreshold(
blur, 255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY_INV,
11, 2
)
return adaptive_threshold
|
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tempest_lib.exceptions import NotFound
from functionaltests.api.v2.models.recordset_model import RecordsetModel
from functionaltests.api.v2.models.recordset_model import RecordsetListModel
from functionaltests.common.client import ClientMixin
from functionaltests.common import utils
class RecordsetClient(ClientMixin):
@classmethod
def recordsets_uri(cls, zone_id):
return "/v2/zones/{0}/recordsets".format(zone_id)
@classmethod
def recordset_uri(cls, zone_id, recordset_id):
return "{0}/{1}".format(cls.recordsets_uri(zone_id), recordset_id)
def list_recordsets(self, zone_id, **kwargs):
resp, body = self.client.get(self.recordsets_uri(zone_id), **kwargs)
return self.deserialize(resp, body, RecordsetListModel)
def get_recordset(self, zone_id, recordset_id, **kwargs):
resp, body = self.client.get(self.recordset_uri(zone_id, recordset_id),
**kwargs)
return self.deserialize(resp, body, RecordsetModel)
def post_recordset(self, zone_id, recordset_model, **kwargs):
resp, body = self.client.post(self.recordsets_uri(zone_id),
body=recordset_model.to_json(), **kwargs)
return self.deserialize(resp, body, RecordsetModel)
def put_recordset(self, zone_id, recordset_id, recordset_model, **kwargs):
resp, body = self.client.put(self.recordset_uri(zone_id, recordset_id),
body=recordset_model.to_json(), **kwargs)
return self.deserialize(resp, body, RecordsetModel)
def delete_recordset(self, zone_id, recordset_id, **kwargs):
resp, body = self.client.delete(
self.recordset_uri(zone_id, recordset_id), **kwargs)
return self.deserialize(resp, body, RecordsetModel)
def wait_for_recordset(self, zone_id, recordset_id):
utils.wait_for_condition(
lambda: self.is_recordset_active(zone_id, recordset_id))
def wait_for_404(self, zone_id, recordset_id):
utils.wait_for_condition(
lambda: self.is_recordset_404(zone_id, recordset_id))
def is_recordset_active(self, zone_id, recordset_id):
resp, model = self.get_recordset(
zone_id, recordset_id)
assert resp.status == 200
if model.status == 'ACTIVE':
return True
elif model.status == 'ERROR':
raise Exception("Saw ERROR status")
return False
def is_recordset_404(self, zone_id, recordset_id):
try:
self.get_recordset(zone_id, recordset_id)
except NotFound:
return True
return False
|
import unittest
from max_sub_array_2 import Solution
from ddt import ddt, data, unpack
@ddt
class Tester(unittest.TestCase):
def setUp(self):
self.s = Solution()
@data(
[[1], 1],
[[-2,1,-3,4,-1,2,1,-5,4], 6],
[[-2,-1],-1],
[[0,-1],0]
)
@unpack
def test(self, nums, expected):
ret = self.s.maxSubArray(nums)
self.assertEqual(ret, expected)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import matplotlib.pyplot as plt
import h5py
import scipy.io
import math
import sklearn
import sklearn.datasets
def init_mome(parameters):
L = len(parameters) // 2 # number of layers
v = {}
# Initialize velocity
for l in range(L):
v["dW" + str(l+1)] = np.zeros((parameters["W" + str(l+1)].shape[0], parameters["W" + str(l+1)].shape[1]))
v["db" + str(l+1)] = np.zeros((parameters["b" + str(l+1)].shape[0], parameters["b" + str(l+1)].shape[1]))
return v
def update_params_mome(parameters, grads, v, beta, learning_rate):
L = len(parameters) // 2 # number of layers in the neural networks
# Momentum update
for l in range(L):
# velocities
v["dW" + str(l+1)] = beta*v["dW" + str(l+1)] + (1 - beta)*grads['dW' + str(l+1)]
v["db" + str(l+1)] = beta*v["db" + str(l+1)] + (1 - beta)*grads['db' + str(l+1)]
# update params
parameters["W" + str(l+1)] = parameters["W" + str(l+1)] - learning_rate*v["dW" + str(l+1)]
parameters["b" + str(l+1)] = parameters["b" + str(l+1)] - learning_rate*v["db" + str(l+1)]
return parameters, v
|
import warnings
from statsmodels.stats.diagnostic import (
CompareCox, CompareJ, HetGoldfeldQuandt, OLS, ResultsStore, acorr_breusch_godfrey,
acorr_ljungbox, acorr_lm, breaks_cusumolsresid, breaks_hansen, compare_cox, compare_j,
het_arch, het_breuschpagan, het_goldfeldquandt, het_white, linear_harvey_collier, linear_lm,
linear_rainbow, recursive_olsresiduals, spec_white
)
from statsmodels.tsa.stattools import adfuller
__all__ = ["CompareCox", "CompareJ", "HetGoldfeldQuandt", "OLS",
"ResultsStore", "acorr_breusch_godfrey", "acorr_ljungbox",
"acorr_lm", "adfuller", "breaks_cusumolsresid", "breaks_hansen",
"compare_cox", "compare_j", "het_arch", "het_breuschpagan",
"het_goldfeldquandt", "het_white", "linear_harvey_collier",
"linear_lm", "linear_rainbow", "recursive_olsresiduals",
"spec_white"]
warnings.warn("The statsmodels.sandbox.stats.diagnostic module is deprecated. "
"Use statsmodels.stats.diagnostic.", DeprecationWarning,
stacklevel=2)
|
import hashlib
import datetime
# import pyecharts
from django.http import HttpResponseNotFound
from django.shortcuts import render, redirect, render_to_response
from django.template import RequestContext
from django.core.paginator import Paginator
from pyecharts import Line
from user.models import *
from device.models import *
from data.models import *
# pyecharts.configure(
# global_theme='roma'
# )
def sha256(password):
return hashlib.sha256(password.encode()).hexdigest()
def page_not_found(request, exception, template_name="error-404.html"):
# response = render_to_response('error-404.html')
# response.status_code = 404
# return response
# return render(request, 'error-404.html')
return render(request, template_name, status=404)
def page_error(request):
return render(request, 'error-500.html', status=500)
def logout(request):
log = UserLogModel()
log.data_time = datetime.datetime.now()
log.phone_number = request.session.get('phone_number', '')
log.action = '0'
if request.META.get('HTTP_X_FORWARDED_FOR', ''):
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
log.ip = ip
log.save()
if request.session.get('phone_number', ''):
request.session.flush()
return redirect('/login/')
def login(request):
context = {
'title': "欢迎来到实验室用电信息统计平台"
}
if request.method == "POST":
# print(request.POST)
phone_number = request.POST.get('phone_number', '0')
password = sha256(request.POST.get('password'))
# print(UserModel.objects.all())
users = UserModel.objects.filter(phone_number=phone_number)
# print(users)
if users:
if users[0].password == password:
context['title'] = "登录成功"
request.session['phone_number'] = phone_number
user = UserModel.objects.get(phone_number=phone_number)
user.last_login_time = datetime.datetime.now()
user.save()
log = UserLogModel()
log.data_time = datetime.datetime.now()
log.phone_number = request.session.get('phone_number', '')
log.action = '1'
if request.META.get('HTTP_X_FORWARDED_FOR', ''):
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
log.ip = ip
log.save()
# print(request.POST.get('remember_me', 'off'))
if request.POST.get('remember_me', 'off') == 'on':
request.session.set_expiry(3600*24*7)
# return dashboard(request)
return redirect('/dashboard/')
else:
log = UserLogModel()
log.data_time = datetime.datetime.now()
log.phone_number = request.POST.get('phone_number', '')
log.action = '2'
if request.META.get('HTTP_X_FORWARDED_FOR', ''):
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
log.ip = ip
log.save()
context['title'] = "密码错误"
else:
log = UserLogModel()
log.data_time = datetime.datetime.now()
log.phone_number = request.POST.get('phone_number', '')
log.action = '2'
if request.META.get('HTTP_X_FORWARDED_FOR', ''):
ip = request.META['HTTP_X_FORWARDED_FOR']
else:
ip = request.META['REMOTE_ADDR']
log.ip = ip
log.save()
context['title'] = "用户不存在"
elif request.session.get('phone_number', ''):
return redirect('/dashboard/')
else:
context['title'] = "欢迎来到实验室用电信息统计平台"
return render(request, 'login_staradmin.html', context=context)
def dashboard(request):
phone_number = request.session.get('phone_number', '')
if phone_number == '':
return redirect('/')
user = UserModel.objects.get(phone_number=phone_number)
time_delta = datetime.datetime.now() - datetime.datetime(2019, 1, 1)
if user.is_admin:
data = DataModel.objects.order_by('data_time')[0:51]
user_number = UserModel.objects.count()
device_number = DeviceModel.objects.count()
data_number = DataModel.objects.count()
if user.device_id:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
else:
device = '管理员设备'
status = True
else:
user_number = UserModel.objects.filter(phone_number=phone_number).count()
device_number = DeviceModel.objects.filter(device_id=user.device_id).count()
data_number = DataModel.objects.filter(device_id=user.device_id).count()
device = DeviceModel.objects.get(device_id=user.device_id)
data = DataModel.objects.filter(device_id=device.device_id).order_by('data_time')[0:51]
status = device.status
status = 'offline' if status is False else 'online'
attr = [str(dat.data_time).split('.')[0] for dat in data]
current_values = [float(dat.current_value) for dat in data]
active_power_values = [float(dat.active_power_value) for dat in data]
total_active_power_values = [float(dat.total_active_power_value) for dat in data]
line = Line("近50条数据", "电流、有功功率和有功总电量")
line.add("电流", attr, current_values, is_fill=True, mark_point=["max", "min"])
line.add("有功功率", attr, active_power_values, mark_point=["max", "min"])
line.add("有功总电量", attr, total_active_power_values, mark_point=["max", "min"])
# if user.is_admin:
# devices = DeviceModel.objects.filter(device_id=user.device_id)
# status = 'offline'
# if devices:
# status = devices[0].status
# else:
# status = 'online'
# user_number = UserModel.objects.count()
# device_number = DeviceModel.objects.count()
# data_number = DataModel.objects.count()
context_data = {
'name': user.name,
'device_id': user.device_id,
'device_status': status,
'instruction': '若系统使用时遇到问题,请及时向系统管理员进行反馈。谢谢大家的配合~',
'user_number': user_number,
'device_number': device_number,
'running_days': time_delta.days,
'data_number': data_number,
'current_peak_value': max([dat.current_value for dat in data]) if len(data) > 0 else 0,
'active_power_peak_value': max([dat.active_power_value for dat in data]) if len(data) > 0 else 0,
'total_active_power_peak_value': max([dat.total_active_power_value for dat in data]) if len(data) > 0 else 0,
'dashboard_chart': line.render_embed(),
}
# else:
# device = DeviceModel.objects.get(device_id=user.device_id)
# data = DataModel.objects.filter(device_id=device.device_id).order_by('data_time')[0:50]
# print(data)
#
# attr = [str(dat.data_time).split('.')[0] for dat in data]
# current_values = [dat.current_value for dat in data]
# active_power_values = [dat.active_power_value for dat in data]
# total_active_power_values = [dat.total_active_power_value for dat in data]
#
# line = Line("近50条数据", "电流、有功功率和有功总电量")
# # line.use_theme('macarons')
# line.add("电流", attr, current_values, is_fill=True, mark_point=["max", "min"])
# line.add("有功功率", attr, active_power_values, mark_point=["max", "min"])
# line.add("有功总电量", attr, total_active_power_values, mark_point=["max", "min"])
#
# context_data = {
# 'name': user.name,
# 'device_id': user.device_id,
# 'device_status': 'online' if device.status == '1' else 'offline',
# 'instruction': '若系统使用时遇到问题,请及时向系统管理员进行反馈。谢谢大家的配合~',
# 'user_number': 1,
# 'device_number': 1,
# 'running_days': time_delta.days,
# 'data_number': len(data),
# 'current_peak_value': max([dat.current_value for dat in data]),
# 'active_power_peak_value': max([dat.active_power_value for dat in data]),
# 'total_active_power_peak_value': max([dat.total_active_power_value for dat in data]),
# 'dashboard_chart': line.render_embed(),
# }
# print(line.get_js_dependencies())
return render(request, 'dashboard_homepage.html', context=context_data)
def user(request):
phone_number = request.session.get('phone_number', '')
if phone_number == '':
return redirect('/')
user = UserModel.objects.get(phone_number=phone_number)
if user.is_admin:
if user.device_id:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
users = UserModel.objects.all()
else:
device = '管理员设备'
status = True
users = UserModel.objects.all()
else:
users = UserModel.objects.filter(phone_number=phone_number)
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
# print(devices)
status = 'offline' if status is False else 'online'
limit = 11
paginator = Paginator(users, limit)
page = request.GET.get('page', 1)
loaded = paginator.page(page)
# print([dat.data_time for dat in devices[0:6]])
context_data = {
'name': user.name,
'device_id': device.device_id,
'device_status': status,
'users': loaded,
}
return render(request, 'user_base.html', context=context_data)
def log(request):
phone_number = request.session.get('phone_number', '')
if phone_number == '':
return redirect('/')
user = UserModel.objects.get(phone_number=phone_number)
if user.is_admin:
if user.device_id:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
logs = UserLogModel.objects.order_by('-data_time')
else:
device = '管理员设备'
status = True
logs = UserLogModel.objects.order_by('-data_time')
else:
logs = UserLogModel.objects.filter(phone_number=user.phone_number).order_by('-data_time')
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
status = 'offline' if status is False else 'online'
limit = 10
paginator = Paginator(logs, limit)
page = request.GET.get('page', 1)
loaded = paginator.page(page)
# print([dat.data_time for dat in devices[0:6]])
context_data = {
'name': user.name,
'device_id': device.device_id,
'device_status': status,
'logs': loaded,
}
return render(request, 'user_log.html', context=context_data)
def new(request):
phone_number = request.session.get('phone_number', '')
if phone_number == '':
return redirect('/')
user = UserModel.objects.get(phone_number=phone_number)
if user.is_admin:
if user.device_id:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
else:
device = '管理员设备'
status = True
else:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
status = 'offline' if status is False else 'online'
context_data = {
'name': user.name,
'device_id': user.device_id,
'device_status': status,
'return_instruction': '',
}
if request.method == "GET":
return render(request, 'user_new.html', context=context_data)
if not user.is_admin:
context_data['return_instruction'] = "只有管理员才有权限新增用户!"
return render(request, 'user_base.html', context=context_data)
if request.POST.get('phone_number', '') == '':
context_data['return_instruction'] = "请注意,电话号码不能为空"
return render(request, 'user_base.html', context=context_data)
try:
device = DeviceModel.objects.get(device_id=request.POST.get('device_id', ''))
except:
context_data['return_instruction'] = "设备号不存在!"
return render(request, 'user_base.html', context=context_data)
user_new = UserModel()
user_new.phone_number = request.POST.get('phone_number', '')
user_new.device_id = request.POST.get('device_id', '')
user_new.password = sha256(request.POST.get('password', ''))
user_new.name = request.POST.get('name', '')
user_new.class_number = request.POST.get('class_number', '')
user_new.id_number = request.POST.get('id_number', '')
user_new.is_admin = True if request.POST.get('is_admin', '') == '是' else False
user_new.comment = request.POST.get('comment', '')
user_new.date_joined = datetime.datetime.now()
user_new.save()
context_data['return_instruction'] = "保存成功"
return render(request, 'user_new.html', context=context_data)
def edit(request):
phone_number = request.session.get('phone_number', '')
if phone_number == '':
return redirect('/')
user = UserModel.objects.get(phone_number=phone_number)
if user.is_admin:
if user.device_id:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
else:
device = '管理员设备'
status = True
else:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
status = 'offline' if status is False else 'online'
if request.method == "GET":
edit_user = UserModel.objects.get(phone_number=request.GET.get('phone_number', ''))
context_data = {
'name': user.name,
'device_id': user.device_id,
'device_status': status,
'return_instruction': '',
'edit_phone_number': request.GET.get('phone_number', ''),
'edit_device_id': edit_user.device_id,
'edit_name': edit_user.name,
'edit_class_number': edit_user.class_number,
'edit_id_number': edit_user.id_number,
'edit_comment': edit_user.comment,
}
if not user.is_admin:
context_data['return_instruction'] = "只有管理员才有权限编辑用户!"
return render(request, 'user_base.html', context=context_data)
else:
return render(request, 'user_edit.html', context=context_data)
if request.POST:
edit_user = UserModel.objects.get(phone_number=request.POST.get('edit_phone_number', ''))
if request.POST.get('device_id', ''):
try:
dev = DeviceModel.objects.get(device_id=request.POST.get('device_id', ''))
except:
context_data = {}
context_data['return_instruction'] = "设备不存在!"
return render(request, 'user_edit.html', context=context_data)
edit_user.device_id = request.POST.get('device_id', '')
edit_user.name = request.POST.get('name', '')
edit_user.class_number = request.POST.get('class_number', '')
edit_user.id_number = request.POST.get('id_number', '')
edit_user.is_admin = True if request.POST.get('is_admin', '') == '是' else False
edit_user.comment = request.POST.get('comment', '')
if request.POST.get('password', ''):
edit_user.password = sha256(request.POST.get('password', ''))
edit_user.save()
context_data = {
'name': user.name,
'device_id': user.device_id,
'device_status': status,
'return_instruction': '',
'edit_phone_number': request.GET.get('phone_number', ''),
'edit_device_id': edit_user.device_id,
'edit_name': edit_user.name,
'edit_class_number': edit_user.class_number,
'edit_id_number': edit_user.id_number,
'edit_comment': edit_user.comment,
}
context_data['return_instruction'] = "编辑成功"
return render(request, 'user_edit.html', context=context_data)
def delete(request):
phone_number = request.session.get('phone_number', '')
if phone_number == '':
return redirect('/')
user = UserModel.objects.get(phone_number=phone_number)
if user.is_admin:
if user.device_id:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
else:
device = '管理员设备'
status = True
else:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
status = 'offline' if status is False else 'online'
context_data = {
'name': user.name,
'device_id': user.device_id,
'device_status': status,
'return_instruction': '',
}
if not user.is_admin:
context_data['return_instruction'] = "只有管理员才有权限删除设备!"
return render(request, 'user_base.html', context=context_data)
if request.GET.get('phone_number', '') == '':
context_data['return_instruction'] = "请注意,手机号不能为空"
return render(request, 'user_base.html', context=context_data)
UserModel.objects.filter(phone_number=request.GET.get('phone_number', '')).delete()
context_data['return_instruction'] = "删除成功"
# print('删除成功')
return redirect('/user/user/')
def search(request):
phone_number = request.session.get('phone_number', '')
if phone_number == '':
return redirect('/')
user = UserModel.objects.get(phone_number=phone_number)
if user.is_admin:
if user.device_id:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
users = UserModel.objects.filter(phone_number=request.GET.get('phone_number', ''))
else:
device = '管理员设备'
status = True
users = UserModel.objects.filter(phone_number=request.GET.get('phone_number', ''))
else:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
if request.GET.get('phone_number', '') != user.phone_number:
users = []
else:
users = UserModel.objects.filter(phone_number=user.phone_number)
# print(devices)
status = 'offline' if status is False else 'online'
limit = 11
paginator = Paginator(users, limit)
page = request.GET.get('page', 1)
loaded = paginator.page(page)
# print([dat.data_time for dat in devices[0:6]])
context_data = {
'name': user.name,
'device_id': device.device_id,
'device_status': status,
'users': loaded,
}
return render(request, 'user_base.html', context=context_data)
def log_search(request):
req_action = request.GET.get('action', '')
req_phone_number = request.GET.get('phone_number', '')
if req_phone_number == '':
return redirect('/')
filter_terms = {}
if req_action:
filter_terms['action'] = req_action
if req_phone_number:
filter_terms['phone_number'] = req_phone_number
phone_number = request.session.get('phone_number', '')
user = UserModel.objects.get(phone_number=phone_number)
if user.is_admin:
if user.device_id:
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
logs = UserLogModel.objects.filter(**filter_terms).order_by('-data_time')
else:
device = '管理员设备'
status = True
logs = UserLogModel.objects.filter(**filter_terms).order_by('-data_time')
else:
logs = UserLogModel.objects.filter(phone_number=user.phone_number).order_by('-data_time')
device = DeviceModel.objects.get(device_id=user.device_id)
status = device.status
if req_phone_number != user.phone_number:
logs = []
context_data = {
'name': user.name,
'device_id': device.device_id,
'device_status': status,
'logs': logs,
'return_instruction': '您无权查看其它用户的日志',
}
return render(request, 'user_log.html', context=context_data)
status = 'offline' if status is False else 'online'
limit = 10
paginator = Paginator(logs, limit)
page = request.GET.get('page', 1)
loaded = paginator.page(page)
# print([dat.data_time for dat in devices[0:6]])
context_data = {
'name': user.name,
'device_id': device.device_id,
'device_status': status,
'logs': loaded,
}
return render(request, 'user_log.html', context=context_data)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2019.2
# Email : muyanru345@163.com
###################################################################
"""
MDivider
"""
import six
from dayu_widgets.label import MLabel
from dayu_widgets.qt import QWidget, Qt, QFrame, QHBoxLayout, Property
class MDivider(QWidget):
'''
A divider line separates different content.
Property:
dayu_text: six.string_types
'''
_alignment_map = {
Qt.AlignCenter: 50,
Qt.AlignLeft: 20,
Qt.AlignRight: 80,
}
def __init__(self, text='', orientation=Qt.Horizontal, alignment=Qt.AlignCenter, parent=None):
super(MDivider, self).__init__(parent)
self._orient = orientation
self._text_label = MLabel().secondary()
self._left_frame = QFrame()
self._right_frame = QFrame()
self._main_lay = QHBoxLayout()
self._main_lay.setContentsMargins(0, 0, 0, 0)
self._main_lay.setSpacing(0)
self._main_lay.addWidget(self._left_frame)
self._main_lay.addWidget(self._text_label)
self._main_lay.addWidget(self._right_frame)
self.setLayout(self._main_lay)
if orientation == Qt.Horizontal:
self._left_frame.setFrameShape(QFrame.HLine)
self._left_frame.setFrameShadow(QFrame.Sunken)
self._right_frame.setFrameShape(QFrame.HLine)
self._right_frame.setFrameShadow(QFrame.Sunken)
else:
self._text_label.setVisible(False)
self._right_frame.setVisible(False)
self._left_frame.setFrameShape(QFrame.VLine)
self._left_frame.setFrameShadow(QFrame.Plain)
self.setFixedWidth(2)
self._main_lay.setStretchFactor(self._left_frame,
self._alignment_map.get(alignment, 50))
self._main_lay.setStretchFactor(self._right_frame,
100 - self._alignment_map.get(alignment, 50))
self._text = None
self.set_dayu_text(text)
def set_dayu_text(self, value):
"""
Set the divider's text.
When text is empty, hide the text_label and right_frame to ensure the divider not has a gap.
:param value: six.string_types
:return: None
"""
self._text = value
self._text_label.setText(value)
if self._orient == Qt.Horizontal:
self._text_label.setVisible(bool(value))
self._right_frame.setVisible(bool(value))
def get_dayu_text(self):
"""
Get current text
:return: six.string_types
"""
return self._text
dayu_text = Property(six.string_types[0], get_dayu_text, set_dayu_text)
@classmethod
def left(cls, text=''):
"""Create a horizontal divider with text at left."""
return cls(text, alignment=Qt.AlignLeft)
@classmethod
def right(cls, text=''):
"""Create a horizontal divider with text at right."""
return cls(text, alignment=Qt.AlignRight)
@classmethod
def center(cls, text=''):
"""Create a horizontal divider with text at center."""
return cls(text, alignment=Qt.AlignCenter)
@classmethod
def vertical(cls):
"""Create a vertical divider"""
return cls(orientation=Qt.Vertical)
|
"""Seawater-ice equilibrium functions.
This module provides thermodynamic functions for seawater in equilibrium
with ice (sea-ice), e.g. the enthalpy of melting. It also provides a
Gibbs free energy function for sea-ice parcels, with primary variables
being the total salinity (mass of salt per mass of salt, liquid, and
ice), temperature, and pressure.
:Examples:
>>> temperature(salt=0.035,pres=1e5)
271.240373585159
>>> enthalpymelt(salt=0.035,pres=1e5)
329942.976285
>>> volumemelt(salt=0.035,pres=1e5)
-9.10140854473e-5
>>> pressure(salt=0.035,temp=270.)
16132047.4385
>>> enthalpymelt(salt=0.035,temp=270.)
326829.393605
>>> volumemelt(salt=0.035,temp=270.)
-9.67135426848e-5
>>> salinity(temp=270.,pres=1e5)
0.05602641503
>>> enthalpymelt(temp=270.,pres=1e5)
328249.119579
>>> volumemelt(temp=270.,pres=1e5)
-9.18186917900e-5
>>> brinesalinity(270.,1e5)
0.05602641503
>>> meltingpressure(0.035,270.)
16132047.4385
>>> freezingtemperature(0.035,1e5)
271.240373585
>>> dtfdp(0.035,1e5)
7.48210942879e-8
>>> dtfds(0.035,1e5)
-56.8751336296
>>> seaice_g(0,0,0,0.035,270.,1e5)
-414.0175745
>>> seaice_g(0,1,0,0.035,270.,1e5)
500.445444181
>>> seaice_g(0,1,1,0.035,270.,1e5)
-1.658664467e-05
>>> brinefraction(0.035,270.,1e5)
0.6247053284
>>> cp(0.035,270.,1e5)
62868.90151
>>> density(0.035,270.,1e5)
993.156434117
>>> enthalpy(0.035,270.,1e5)
-135534.287504
>>> entropy(0.035,270.,1e5)
-500.445444181
>>> expansion(0.035,270.,1e5)
-1.647313287e-02
>>> kappa_t(0.035,270.,1e5)
1.56513441348e-9
:Functions:
* :func:`eq_stp`: Calculate primary variables for sea-ice at any two of
the seawater salinity, temperature, and pressure.
* :func:`densityice`: Sea-ice ice density.
* :func:`densitysea`: Sea-ice seawater density.
* :func:`enthalpyice`: Sea-ice ice enthalpy.
* :func:`enthalpysea`: Sea-ice seawater enthalpy.
* :func:`entropyice`: Sea-ice ice entropy for sea ice.
* :func:`entropysea`: Sea-ice seawater entropy.
* :func:`pressure`: Sea-ice pressure.
* :func:`temperature`: Sea-ice temperature.
* :func:`salinity`: Sea-ice salinity.
* :func:`enthalpymelt`: Enthalpy of melting.
* :func:`volumemelt`: Specific volume of melting.
* :func:`brinesalinity`: Salinity of seawater in equilibrium with ice.
* :func:`meltingpressure`: Pressure of seawater in equilibrium with ice.
* :func:`freezingtemperature`: Temperature of seawater in equilibrium
with ice.
* :func:`dtfdp`: Freezing point depression of seawater due to pressure.
* :func:`dtfds`: Freezing point depression of seawater due to salinity.
* :func:`eq_seaice`: Calculate primary variables for a sea-ice parcel at
the given total salinity, temperature, and pressure.
* :func:`seaice_g`: Sea-ice Gibbs free energy with derivatives.
* :func:`brinefraction`: Sea-ice seawater mass fraction.
* :func:`cp`: Sea-ice isobaric heat capacity.
* :func:`density`: Sea-ice total density.
* :func:`enthalpy`: Sea-ice specific enthalpy.
* :func:`entropy`: Sea-ice specific entropy.
* :func:`expansion`: Sea-ice thermal expansion coefficient.
* :func:`kappa_t`: Sea-ice isothermal compressibility.
"""
__all__ = ['eq_stp','densityice','densitysea','enthalpyice','enthalpysea',
'entropyice','entropysea','pressure','temperature','salinity',
'enthalpymelt','volumemelt',
'brinesalinity','meltingpressure','freezingtemperature','dtfdp','dtfds',
'eq_seaice','seaice_g','brinefraction','cp','density','enthalpy','entropy',
'expansion','kappa_t']
import warnings
import numpy
from teospy import constants0
from teospy import flu1
from teospy import ice1
from teospy import flu2
from teospy import ice2
from teospy import sal2
from teospy import maths3
from teospy import flu3a
from teospy import sea3a
from teospy import maths4
_CHKTOL = constants0.CHKTOL
_MSAL = constants0.MSAL
_RUNIV = constants0.RUNIV
_RWAT = constants0.RWAT
_TTP = constants0.TTP
_PTPE = constants0.PTPE
_DLTP = constants0.DLTP
_DITP = constants0.DITP
_LILTP = constants0.LILTP
_CLIQ = constants0.CLIQ
_CICE = constants0.CICE
_SAL0 = constants0.SAL0
_RSAL = _RUNIV / _MSAL
_VLTP = _DLTP**(-1)
_VITP = _DITP**(-1)
_C_SP = 0.396166676603
_E = numpy.exp(1)
_chkflubnds = constants0.chkflubnds
_chkicebnds = constants0.chkicebnds
_chksalbnds = constants0.chksalbnds
_flu_f = flu1.flu_f
_ice_g = ice1.ice_g
_eq_pressure = flu2.eq_pressure
_eq_chempot = flu2.eq_chempot
_sal_g = sal2.sal_g
_eq_liqpot = sal2.eq_liqpot
_newton = maths3.newton
_dliq_default = flu3a._dliq_default
## Equilibrium functions
def _approx_st(salt,temp):
"""Approximate PDl at ST.
Approximate the pressure and liquid water density of sea-ice with
the given salinity and temperature.
:arg float salt: Salinity in kg/kg.
:arg float temp: Temperature in K.
:returns: Pressure and liquid water density (both in SI units).
"""
dmu = ((_CLIQ-_CICE)*(temp - _TTP - temp*numpy.log(temp/_TTP))
+ -_LILTP/_TTP*(temp - _TTP) - _RSAL*temp*salt)
pres = _PTPE + dmu/(_VITP-_VLTP)
dliq = _dliq_default(temp,pres)
return pres, dliq
def _approx_sp(salt,pres):
"""Approximate TDl at SP.
Approximate the temperature and liquid water density of sea-ice with
the given salinity and pressure.
:arg float salt: Salinity in kg/kg.
:arg float pres: Pressure in Pa.
:returns: Temperature and liquid water density (both in SI units).
"""
CDIF = _CLIQ-_CICE
R0 = _LILTP/_TTP / CDIF
r1 = (pres-_PTPE) * (_VITP-_VLTP)/_TTP / CDIF
r2 = _RSAL*salt / CDIF
w = -(1 - R0 + r1) * numpy.exp(-(1 - R0 - r2))
negz = 1 - (1 + _E*w)**_C_SP
temp = (1 - R0 + r1)*_TTP/negz
dliq = _dliq_default(temp,pres)
return temp, dliq
def _approx_sp2(salt,pres):
"""Approximate TDl at SP.
Approximate the temperature and liquid water density of sea-ice with
the given salinity and pressure.
:arg float salt: Salinity in kg/kg.
:arg float pres: Pressure in Pa.
:returns: Temperature and liquid water density (both in SI units).
"""
x = (_RSAL*_TTP*salt + (pres-_PTPE)*(_VITP-_VLTP)) / _LILTP
temp = _TTP * (1-x)
dliq = _dliq_default(temp,pres)
return temp, dliq
def _approx_tp(temp,pres,dliq):
"""Approximate S at TP.
Approximate the salinity of sea-ice with the given temperature and
pressure.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg float dliq: Liquid water density in kg/m3 (unused).
:returns: Salinity in kg/kg.
"""
dmu = ((_CLIQ-_CICE) * (temp-_TTP-temp*numpy.log(temp/_TTP))
+ -_LILTP/_TTP*(temp-_TTP) - (pres-_PTPE)*(_VITP-_VLTP))
salt = dmu / (_RSAL*temp)
return salt
def _diff_st(p,dl,salt,temp,useext=False):
"""Calculate sea-ice disequilibrium at ST.
Calculate both sides of the equations
given pressure = pressure of liquid water
chemical potential of ice = potential of liquid water
and their Jacobians with respect to pressure and liquid water
density. Solving these equations gives equilibrium values at the
given salinity and temperature.
:arg float p: Pressure in Pa.
:arg float dl: Liquid water density in kg/m3.
:arg float salt: Salinity in kg/kg.
:arg float temp: Temperature in K.
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
pl = _eq_pressure(0,0,temp,dl)
gi = _ice_g(0,0,temp,p)
gl = _eq_chempot(0,0,temp,dl)
gl += _eq_liqpot(0,0,0,salt,temp,p,useext=useext)
lhs = numpy.array([p, gi])
rhs = numpy.array([pl, gl])
pl_d = _eq_pressure(0,1,temp,dl)
gi_p = _ice_g(0,1,temp,p)
gl_d = _eq_chempot(0,1,temp,dl)
gl_p = _eq_liqpot(0,0,1,salt,temp,p,useext=useext)
dlhs = numpy.array([[1.,0.], [gi_p,0.]])
drhs = numpy.array([[0.,pl_d], [gl_p,gl_d]])
return lhs, rhs, dlhs, drhs
def _diff_sp(t,dl,salt,pres,useext=False):
"""Calculate sea-ice disequilibrium at SP.
Calculate both sides of the equations
given pressure = pressure of liquid water
chemical potential of ice = potential of liquid water
and their Jacobians with respect to temperature and liquid water
density. Solving these equations gives equilibrium values at the
given salinity and pressure.
:arg float t: Temperature in K.
:arg float dl: Liquid water density in kg/m3.
:arg float salt: Salinity in kg/kg.
:arg float pres: Pressure in Pa.
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:returns: Left-hand side of the equation, right-hand side,
Jacobian of LHS, and Jacobian of RHS.
:rtype: tuple(array(float))
"""
pl = _eq_pressure(0,0,t,dl)
gi = _ice_g(0,0,t,pres)
gl = _eq_chempot(0,0,t,dl)
gl += _eq_liqpot(0,0,0,salt,t,pres,useext=useext)
lhs = numpy.array([pres, gi])
rhs = numpy.array([pl, gl])
pl_t = _eq_pressure(1,0,t,dl)
pl_d = _eq_pressure(0,1,t,dl)
gi_t = _ice_g(1,0,t,pres)
gl_t = _eq_chempot(1,0,t,dl)
gl_t += _eq_liqpot(0,1,0,salt,t,pres,useext=useext)
gl_d = _eq_chempot(0,1,t,dl)
dlhs = numpy.array([[0.,0.], [gi_t,0.]])
drhs = numpy.array([[pl_t,pl_d], [gl_t,gl_d]])
return lhs, rhs, dlhs, drhs
def _diff_tp(s,temp,pres,dliq,useext=False):
"""Calculate sea-ice disequilibrium at TP.
Calculate both sides of the equation
chemical potential of ice = potential of liquid water
and their derivatives with respect to salinity. Solving these
equations gives the equilibrium salinity at the given temperature
and pressure.
:arg float s: Salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg float dliq: Liquid water density in kg/m3.
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:returns: Left-hand side of the equation, right-hand side,
derivative of LHS, and derivative of RHS.
:rtype: tuple(float)
"""
gi = _ice_g(0,0,temp,pres)
gl = _eq_chempot(0,0,temp,dliq)
gl += _eq_liqpot(0,0,0,s,temp,pres,useext=useext)
lhs = gi
rhs = gl
gl_s = _eq_liqpot(1,0,0,s,temp,pres,useext=useext)
dlhs = 0.
drhs = gl_s
return lhs, rhs, dlhs, drhs
def eq_stp(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,chkbnd=False,
useext=False,mathargs=None):
"""Get primary sea-ice variables at STP.
Get the values of all primary variables for sea-ice in equilibrium.
At least two of the salinity, temperature, and pressure must be
provided.
If the calculation has already been done, the results can be passed
to avoid unnecessary repeat calculations. If enough values are
passed, they will be checked for consistency if chkvals is True.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Salinity, temperature, pressure, and seawater liquid
density (all in SI units).
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
"""
if sum(val is None for val in (salt,temp,pres)) > 1:
errmsg = 'Must provide at least two of (salt,temp,pres)'
raise ValueError(errmsg)
if mathargs is None:
mathargs = dict()
fkwargs = {'useext': useext}
if salt is None:
dliq = flu3a.eq_tp_liq(temp,pres,dliq=dliq,dliq0=dliq0,
mathargs=mathargs)
fargs = (temp,pres,dliq)
salt = _newton(_diff_tp,salt0,_approx_tp,fargs=fargs,fkwargs=fkwargs,
**mathargs)
elif temp is None:
x0 = (temp0,dliq0)
fargs = (salt,pres)
x1 = _newton(_diff_sp,x0,_approx_sp,fargs=fargs,fkwargs=fkwargs,
**mathargs)
temp, dliq = x1
elif pres is None:
x0 = (pres0,dliq0)
fargs = (salt,temp)
x1 = _newton(_diff_st,x0,_approx_st,fargs=fargs,fkwargs=fkwargs,
**mathargs)
pres, dliq = x1
elif dliq is None:
dliq = flu3a.eq_tp_liq(temp,pres,dliq0=dliq0,mathargs=mathargs)
_chkflubnds(temp,dliq,chkbnd=chkbnd)
_chkicebnds(temp,pres,chkbnd=chkbnd)
_chksalbnds(salt,temp,pres,chkbnd=chkbnd)
if not chkvals:
return salt, temp, pres, dliq
lhs, rhs, __, __ = _diff_st(pres,dliq,salt,temp,useext=useext)
errs = list()
for (l,r) in zip(lhs,rhs):
if abs(r) >= chktol:
errs.append(abs(l/r-1))
else:
errs.append(abs(l-r))
if max(errs) > chktol:
warnmsg = ('Given values {0} and solutions {1} disagree to more than '
'the tolerance {2}').format(lhs,rhs,chktol)
warnings.warn(warnmsg,RuntimeWarning)
return salt, temp, pres, dliq
## Thermodynamic functions
def densityice(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice ice density.
Calculate the density of ice in sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Density in kg/m3.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> densityice(salt=0.035,pres=1e5)
917.000739687
>>> densityice(salt=0.035,temp=270.)
918.898527655
>>> densityice(temp=270.,pres=1e5)
917.181167192
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
dice = ice2.density(temp,pres)
return dice
def densitysea(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice seawater density.
Calculate the density of seawater in sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Density in kg/m3.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> densitysea(salt=0.035,pres=1e5)
1028.05199645
>>> densitysea(salt=0.035,temp=270.)
1035.73670169
>>> densitysea(temp=270.,pres=1e5)
1045.16805918
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
dsea = sea3a.density(salt,temp,pres,dliq=dliq,useext=useext)
return dsea
def enthalpyice(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice ice enthalpy.
Calculate the specific enthalpy of ice in sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpyice(salt=0.035,pres=1e5)
-337351.999358
>>> enthalpyice(salt=0.035,temp=270.)
-323205.968289
>>> enthalpyice(temp=270.,pres=1e5)
-339929.555499
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
hice = ice2.enthalpy(temp,pres)
return hice
def enthalpysea(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice seawater enthalpy.
Calculate the specific enthalpy of seawater in sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpysea(salt=0.035,pres=1e5)
-7613.193379
>>> enthalpysea(salt=0.035,temp=270.)
2832.949104
>>> enthalpysea(temp=270.,pres=1e5)
-12742.86649
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
hsea = sea3a.enthalpy(salt,temp,pres,dliq=dliq,useext=useext)
return hsea
def entropyice(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice ice entropy.
Calculate the specific entropy of ice in sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> entropyice(salt=0.035,pres=1e5)
-1235.44872812
>>> entropyice(salt=0.035,temp=270.)
-1247.71314646
>>> entropyice(temp=270.,pres=1e5)
-1244.97335506
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
sice = ice2.entropy(temp,pres)
return sice
def entropysea(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice seawater entropy.
Calculate the specific entropy of seawater in sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> entropysea(salt=0.035,pres=1e5)
-27.9264598103
>>> entropysea(salt=0.035,temp=270.)
-46.7361169560
>>> entropysea(temp=270.,pres=1e5)
-53.1667911144
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
ssea = sea3a.entropy(salt,temp,pres,dliq=dliq,useext=useext)
return ssea
def pressure(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice pressure.
Calculate the pressure of sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Pressure in Pa.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> pressure(salt=0.035,temp=270.)
16132047.4385
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
return pres
def temperature(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice temperature.
Calculate the temperature of sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Temperature in K.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> temperature(salt=0.035,pres=1e5)
271.240373585159
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
return temp
def salinity(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice salinity.
Calculate the salinity of sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Salinity in kg/kg.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> salinity(temp=270.,pres=1e5)
0.05602641503
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
return salt
def enthalpymelt(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate the enthalpy of melting.
Calculate the specific enthalpy of melting of sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> enthalpymelt(salt=0.035,pres=1e5)
329942.976285
>>> enthalpymelt(salt=0.035,temp=270.)
326829.393605
>>> enthalpymelt(temp=270.,pres=1e5)
328249.119579
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
fl_t = _flu_f(1,0,temp,dliq)
gs_t = _sal_g(0,1,0,salt,temp,pres,useext=useext)
gs_st = _sal_g(1,1,0,salt,temp,pres,useext=useext)
gi_t = _ice_g(1,0,temp,pres)
hmelt = temp * (gi_t - (fl_t + gs_t - salt*gs_st))
return hmelt
def volumemelt(salt=None,temp=None,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate the volume of melting.
Calculate the specific volume of melting of sea-ice.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Specific volume in m3/kg.
:raises ValueError: If fewer than two of salt, temp, and pres are
provided.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> volumemelt(salt=0.035,pres=1e5)
-9.10140854473e-5
>>> volumemelt(salt=0.035,temp=270.)
-9.67135426848e-5
>>> volumemelt(temp=270.,pres=1e5)
-9.18186917900e-5
"""
salt, temp, pres, dliq = eq_stp(salt=salt,temp=temp,pres=pres,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,temp0=temp0,pres0=pres0,
dliq0=dliq0,chkbnd=chkbnd,useext=useext,mathargs=mathargs)
gs_p = _sal_g(0,0,1,salt,temp,pres,useext=useext)
gs_sp = _sal_g(1,0,1,salt,temp,pres,useext=useext)
gi_p = _ice_g(0,1,temp,pres)
vmelt = dliq**(-1) + gs_p - salt*gs_sp - gi_p
return vmelt
## Thermodynamic functions of two variables
def brinesalinity(temp,pres,salt=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,temp0=None,pres0=None,dliq0=None,
chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice brine salinity.
Calculate the salinity of seawater (brine) in equilibrium with ice
of the given temperature and pressure.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Salinity in kg/kg. If unknown, pass None (default) and it
will be calculated.
:type salt: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the salinity in kg/kg. If None
(default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Salinity in kg/kg.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> brinesalinity(270.,1e5)
0.05602641503
"""
salt, __, __, dliq = eq_stp(temp=temp,pres=pres,salt=salt,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,
useext=useext,mathargs=mathargs)
return salt
def meltingpressure(salt,temp,pres=None,dliq=None,chkvals=False,
chktol=_CHKTOL,pres0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Calculate sea-ice melting pressure.
Calculate the pressure required to melt ice into seawater at the
given salinity and temperature.
:arg float salt: Salinity in kg/kg.
:arg float temp: Temperature in K.
:arg pres: Pressure in Pa. If unknown, pass None (default) and it
will be calculated.
:type pres: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg pres0: Initial guess for the pressure in Pa. If None (default)
then `_approx_st` is used.
:type pres0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Pressure in Pa.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> meltingpressure(0.035,270.)
16132047.4385
"""
__, __, pres, dliq = eq_stp(temp=temp,pres=pres,salt=salt,dliq=dliq,
chkvals=chkvals,chktol=chktol,pres0=pres0,dliq0=dliq0,chkbnd=chkbnd,
useext=useext,mathargs=mathargs)
return pres
def freezingtemperature(salt,pres,temp=None,dliq=None,chkvals=False,
chktol=_CHKTOL,temp0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Calculate sea-ice freezing temperature.
Calculate the temperature required to freeze seawater at the given
salinity and pressure.
:arg float salt: Salinity in kg/kg.
:arg float pres: Pressure in Pa.
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Temperature in K.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> freezingtemperature(0.035,1e5)
271.240373585
"""
__, temp, __, dliq = eq_stp(temp=temp,pres=pres,salt=salt,dliq=dliq,
chkvals=chkvals,chktol=chktol,temp0=temp0,dliq0=dliq0,chkbnd=chkbnd,
useext=useext,mathargs=mathargs)
return temp
def dtfdp(salt,pres,temp=None,dliq=None,chkvals=False,chktol=_CHKTOL,
temp0=None,dliq0=None,chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice freezing point pressure lowering.
Calculate the effect of pressure on lowering the freezing point of
sea-ice.
:arg float salt: Salinity in kg/kg.
:arg float pres: Pressure in Pa.
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Freezing point lowering in K/Pa.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> dtfdp(0.035,1e5)
7.48210942879e-8
"""
__, temp, __, dliq = eq_stp(temp=temp,pres=pres,salt=salt,dliq=dliq,
chkvals=chkvals,chktol=chktol,temp0=temp0,dliq0=dliq0,chkbnd=chkbnd,
useext=useext,mathargs=mathargs)
fl_t = _flu_f(1,0,temp,dliq)
gs_t = _sal_g(0,1,0,salt,temp,pres,useext=useext)
gs_p = _sal_g(0,0,1,salt,temp,pres,useext=useext)
gs_st = _sal_g(1,1,0,salt,temp,pres,useext=useext)
gs_sp = _sal_g(1,0,1,salt,temp,pres,useext=useext)
gi_t = _ice_g(1,0,temp,pres)
gi_p = _ice_g(0,1,temp,pres)
dent = fl_t + gs_t - salt*gs_st - gi_t
dvol = dliq**(-1) + gs_p - salt*gs_sp - gi_p
dtfdp = dvol/dent
return dtfdp
def dtfds(salt,pres,temp=None,dliq=None,chkvals=False,chktol=_CHKTOL,
temp0=None,dliq0=None,chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice freezing point salt lowering.
Calculate the effect of salinity on lowering the freezing point of
sea-ice.
:arg float salt: Salinity in kg/kg.
:arg float pres: Pressure in Pa.
:arg temp: Temperature in K. If unknown, pass None (default) and it
will be calculated.
:type temp: float or None
:arg dliq: Density of liquid water in seawater in kg/m3. If
unknown, pass None (default) and it will be calculated.
:type dliq: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg temp0: Initial guess for the temperature in K. If None
(default) then `_approx_sp` is used.
:type temp0: float or None
:arg dliq0: Initial guess for the liquid water density in kg/m3. If
None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Freezing point lowering in K/(kg/kg).
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:Examples:
>>> dtfds(0.035,1e5)
-56.8751336296
"""
__, temp, __, dliq = eq_stp(temp=temp,pres=pres,salt=salt,dliq=dliq,
chkvals=chkvals,chktol=chktol,temp0=temp0,dliq0=dliq0,chkbnd=chkbnd,
useext=useext,mathargs=mathargs)
fl_t = _flu_f(1,0,temp,dliq)
gs_t = _sal_g(0,1,0,salt,temp,pres,useext=useext)
gs_ss = _sal_g(2,0,0,salt,temp,pres,useext=useext)
gs_st = _sal_g(1,1,0,salt,temp,pres,useext=useext)
gi_t = _ice_g(1,0,temp,pres)
dent = fl_t + gs_t - salt*gs_st - gi_t
dtfds = salt*gs_ss / dent
return dtfds
## Seawater-ice combined system
def eq_seaice(sisal,temp,pres,salt=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Get primary sea-ice variables at SsiTP.
Get the values of all primary variables for a seawater-ice parcel at
the given total salinity, temperature, and pressure. Total salinity
here is the ratio of the mass of salt to the total parcel mass (salt
+ liquid water + ice).
If the calculation has already been done, the results can be passed
to avoid unnecessary repeat calculations. If enough values are
passed, they will be checked for consistency if chkvals is True.
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Seawater salinity and liquid water density (both in SI
units).
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
"""
if salt is None or dliq is None:
salt, __, __, dliq = eq_stp(temp=temp,pres=pres,salt=salt,dliq=dliq,
chkvals=chkvals,chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,
useext=useext,mathargs=mathargs)
if salt < sisal:
warnmsg = ('Equilibrium salinity {0} is lower than the total parcel '
'salinity {1}').format(salt,sisal)
warnings.warn(warnmsg,RuntimeWarning)
salt = sisal
return salt, dliq
def seaice_g(drvs,drvt,drvp,sisal,temp,pres,salt=None,dliq=None,
chkvals=False,chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,
useext=False,mathargs=None):
"""Calculate sea-ice Gibbs free energy with derivatives.
Calculate the specific Gibbs free energy of a sea-ice parcel or its
derivatives with respect to total salinity, temperature, and
pressure.
:arg int drvs: Number of total salinity derivatives.
:arg int drvt: Number of temperature derivatives.
:arg int drvp: Number of pressure derivatives.
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Gibbs free energy in units of
(J/kg) / (kg/kg)^drvs / K^drvt / Pa^drvp.
:raises ValueError: If any of (drvs,drvt,drvp) are negative or if
(drvs+drvt+drvp) > 2.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> seaice_g(0,0,0,0.035,270.,1e5)
-414.0175745
>>> seaice_g(1,0,0,0.035,270.,1e5)
96363.77305
>>> seaice_g(0,1,0,0.035,270.,1e5)
500.445444181
>>> seaice_g(0,0,1,0.035,270.,1e5)
1.00689072300e-3
>>> seaice_g(2,0,0,0.035,270.,1e5)
0.
>>> seaice_g(1,1,0,0.035,270.,1e5)
-21272.2260252
>>> seaice_g(1,0,1,0.035,270.,1e5)
-2.383040378e-03
>>> seaice_g(0,2,0,0.035,270.,1e5)
-232.847783380
>>> seaice_g(0,1,1,0.035,270.,1e5)
-1.658664467e-05
>>> seaice_g(0,0,2,0.035,270.,1e5)
-1.57591932118e-12
"""
drvtup = (drvs,drvt,drvp)
if any(drv < 0 for drv in drvtup) or sum(drvtup) > 2:
errmsg = 'Derivatives {0} not recognized'.format(drvtup)
raise ValueError(errmsg)
salt, dliq = eq_seaice(sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals,
chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext,
mathargs=mathargs)
seaf = sisal/salt
# Straightforward derivatives
if (drvs,drvt,drvp) == (0,0,0):
gl = _eq_chempot(0,0,temp,dliq)
gs = _sal_g(0,0,0,salt,temp,pres,useext=useext)
gi = _ice_g(0,0,temp,pres)
g = seaf*(gl + gs) + (1-seaf)*gi
return g
elif (drvs,drvt,drvp) == (1,0,0):
gs_s = _sal_g(1,0,0,salt,temp,pres,useext=useext)
g_s = gs_s
return g_s
elif (drvs,drvt,drvp) == (0,1,0):
fl_t = _flu_f(1,0,temp,dliq)
gs_t = _sal_g(0,1,0,salt,temp,pres,useext=useext)
gi_t = _ice_g(1,0,temp,pres)
g_t = seaf*(fl_t + gs_t) + (1-seaf)*gi_t
return g_t
elif (drvs,drvt,drvp) == (0,0,1):
gs_p = _sal_g(0,0,1,salt,temp,pres,useext=useext)
gi_p = _ice_g(0,1,temp,pres)
g_p = seaf*(dliq**(-1) + gs_p) + (1-seaf)*gi_p
return g_p
elif (drvs,drvt,drvp) == (2,0,0):
g_ss = 0.0
return g_ss
elif (drvs,drvt,drvp) == (1,1,0):
fl_t = _flu_f(1,0,temp,dliq)
gs_t = _sal_g(0,1,0,salt,temp,pres,useext=useext)
gi_t = _ice_g(1,0,temp,pres)
g_st = (fl_t + gs_t - gi_t)/salt
return g_st
elif (drvs,drvt,drvp) == (1,0,1):
gs_p = _sal_g(0,0,1,salt,temp,pres,useext=useext)
gi_p = _ice_g(0,1,temp,pres)
g_sp = (dliq**(-1) + gs_p - gi_p)/salt
return g_sp
# Other derivatives require inversion
cl = _eq_pressure(0,1,temp,dliq)
gs_ss = _sal_g(2,0,0,salt,temp,pres,useext=useext)
if drvt > 0:
fl_t = _flu_f(1,0,temp,dliq)
gs_t = _sal_g(0,1,0,salt,temp,pres,useext=useext)
gs_st = _sal_g(1,1,0,salt,temp,pres,useext=useext)
gi_t = _ice_g(1,0,temp,pres)
dentr = fl_t + gs_t - salt*gs_st - gi_t
if drvp > 0:
gs_p = _sal_g(0,0,1,salt,temp,pres,useext=useext)
gs_sp = _sal_g(1,0,1,salt,temp,pres,useext=useext)
gi_p = _ice_g(0,1,temp,pres)
dvol = dliq**(-1) + gs_p - salt*gs_sp - gi_p
s_p = dvol / (salt*gs_ss)
dl_p = cl**(-1)
if (drvs,drvt,drvp) == (0,2,0):
fl_tt = _flu_f(2,0,temp,dliq)
fl_td = _flu_f(1,1,temp,dliq)
gs_tt = _sal_g(0,2,0,salt,temp,pres,useext=useext)
gi_tt = _ice_g(2,0,temp,pres)
s_t = dentr / (salt*gs_ss)
dl_t = -dliq**2*fl_td/cl
gb_tt = fl_tt + fl_td*dl_t + gs_tt
g_tt = -seaf/salt*dentr*s_t + seaf*gb_tt + (1-seaf)*gi_tt
return g_tt
elif (drvs,drvt,drvp) == (0,1,1):
fl_td = _flu_f(1,1,temp,dliq)
gs_tp = _sal_g(0,1,1,salt,temp,pres,useext=useext)
gi_tp = _ice_g(1,1,temp,pres)
gb_tp = fl_td*dl_p + gs_tp
g_tp = -seaf/salt*dentr*s_p + seaf*gb_tp + (1-seaf)*gi_tp
return g_tp
elif (drvs,drvt,drvp) == (0,0,2):
gs_pp = _sal_g(0,0,2,salt,temp,pres,useext=useext)
gi_pp = _ice_g(0,2,temp,pres)
gb_pp = -dl_p/dliq**2 + gs_pp
g_pp = -seaf/salt*dvol*s_p + seaf*gb_pp + (1-seaf)*gi_pp
return g_pp
# Should not have made it this far!
errmsg = 'Derivatives {0} not recognized'.format((drvs,drvt,drvp))
raise ValueError(errmsg)
def brinefraction(sisal,temp,pres,salt=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Calculate sea-ice brine fraction.
Calculate the mass fraction of seawater (brine) in a sea-ice parcel,
the ratio of the mass of seawater (salt + liquid water) to the total
mass (salt + liquid water + ice).
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Brine fraction in kg/kg.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> brinefraction(0.035,270.,1e5)
0.6247053284
"""
salt, dliq = eq_seaice(sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals,
chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext,
mathargs=mathargs)
seaf = sisal/salt
return seaf
def cp(sisal,temp,pres,salt=None,dliq=None,chkvals=False,chktol=_CHKTOL,
salt0=None,dliq0=None,chkbnd=False,useext=False,mathargs=None):
"""Calculate sea-ice isobaric heat capacity.
Calculate the isobaric heat capacity of sea-ice.
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Heat capacity in J/kg/K.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> cp(0.035,270.,1e5)
62868.90151
"""
g_tt = seaice_g(0,2,0,sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals,
chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext,
mathargs=mathargs)
cp = -temp * g_tt
return cp
def density(sisal,temp,pres,salt=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Calculate sea-ice total density.
Calculate the total density of a sea-ice parcel.
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Density in kg/m3.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> density(0.035,270.,1e5)
993.156434117
"""
g_p = seaice_g(0,0,1,sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals,
chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext,
mathargs=mathargs)
rho = g_p**(-1)
return rho
def enthalpy(sisal,temp,pres,salt=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Calculate sea-ice enthalpy.
Calculate the specific enthalpy of a sea-ice parcel.
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Enthalpy in J/kg.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> enthalpy(0.035,270.,1e5)
-135534.287504
"""
salt, dliq = eq_seaice(sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals,
chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext,
mathargs=mathargs)
g = seaice_g(0,0,0,sisal,temp,pres,salt=salt,dliq=dliq,useext=useext)
g_t = seaice_g(0,1,0,sisal,temp,pres,salt=salt,dliq=dliq,useext=useext)
h = g - temp*g_t
return h
def entropy(sisal,temp,pres,salt=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Calculate sea-ice entropy.
Calculate the specific entropy of a sea-ice parcel.
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Entropy in J/kg/K.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> entropy(0.035,270.,1e5)
-500.445444181
"""
g_t = seaice_g(0,1,0,sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals,
chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext,
mathargs=mathargs)
s = -g_t
return s
def expansion(sisal,temp,pres,salt=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Calculate sea-ice thermal expansion coefficient.
Calculate the thermal expansion coefficient of a sea-ice parcel.
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Expansion coefficient in 1/K.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> expansion(0.035,270.,1e5)
-1.647313287e-02
"""
salt, dliq = eq_seaice(sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals,
chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext,
mathargs=mathargs)
g_p = seaice_g(0,0,1,sisal,temp,pres,salt=salt,dliq=dliq,useext=useext)
g_tp = seaice_g(0,1,1,sisal,temp,pres,salt=salt,dliq=dliq,useext=useext)
alpha = g_tp / g_p
return alpha
def kappa_t(sisal,temp,pres,salt=None,dliq=None,chkvals=False,
chktol=_CHKTOL,salt0=None,dliq0=None,chkbnd=False,useext=False,
mathargs=None):
"""Calculate sea-ice isothermal compressibility.
Calculate the isothermal compressibility of a sea-ice parcel.
:arg float sisal: Total sea-ice salinity in kg/kg.
:arg float temp: Temperature in K.
:arg float pres: Pressure in Pa.
:arg salt: Seawater salinity in kg/kg. If unknown, pass None
(default) and it will be calculated.
:type salt: float or None
:arg dliq: Seawater liquid water density in kg/m3. If unknown, pass
None (default) and it will be calculated.
:type dliq: float or None
:arg dvap: Water vapour density in kg/m3. If unknown, pass None
(default) and it will be calculated.
:type dvap: float or None
:arg bool chkvals: If True (default False) and all values are given,
this function will calculate the disequilibrium and raise a
warning if the results are not within a given tolerance.
:arg float chktol: Tolerance to use when checking values (default
_CHKTOL).
:arg salt0: Initial guess for the seawater salinity in kg/kg. If
None (default) then `_approx_tp` is used.
:type salt0: float or None
:arg dliq0: Initial guess for the seawater liquid water density in
kg/m3. If None (default) then `flu3a._dliq_default` is used.
:type dliq0: float or None
:arg bool chkbnd: If True then warnings are raised when the given
values are valid but outside the recommended bounds (default
False).
:arg bool useext: If False (default) then the salt contribution is
calculated from _GSCOEFFS; if True, from _GSCOEFFS_EXT.
:arg mathargs: Keyword arguments to the root-finder
:func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None
(default) then no arguments are passed and default parameters
will be used.
:returns: Compressibility in 1/Pa.
:raises RuntimeWarning: If the relative disequilibrium is more than
chktol, if chkvals is True and all values are given.
:raises RuntimeWarning: If the equilibrium seawater salinity is
lower than the total parcel salinity.
:Examples:
>>> kappa_t(0.035,270.,1e5)
1.56513441348e-9
"""
salt, dliq = eq_seaice(sisal,temp,pres,salt=salt,dliq=dliq,chkvals=chkvals,
chktol=chktol,salt0=salt0,dliq0=dliq0,chkbnd=chkbnd,useext=useext,
mathargs=mathargs)
g_p = seaice_g(0,0,1,sisal,temp,pres,salt=salt,dliq=dliq,useext=useext)
g_pp = seaice_g(0,0,2,sisal,temp,pres,salt=salt,dliq=dliq,useext=useext)
kappa = -g_pp / g_p
return kappa
|
from __future__ import annotations
import os
import re
import shutil
import stat
import tempfile
from collections.abc import Mapping
from contextlib import contextmanager
from pathlib import Path
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import Iterator
if TYPE_CHECKING:
from poetry.core.packages.package import Package
from requests import Session
from poetry.config.config import Config
_canonicalize_regex = re.compile("[-_]+")
def canonicalize_name(name: str) -> str:
return _canonicalize_regex.sub("-", name).lower()
def module_name(name: str) -> str:
return canonicalize_name(name).replace(".", "_").replace("-", "_")
def _del_ro(action: Callable, name: str, exc: Exception) -> None:
os.chmod(name, stat.S_IWRITE)
os.remove(name)
@contextmanager
def temporary_directory(*args: Any, **kwargs: Any) -> Iterator[str]:
name = tempfile.mkdtemp(*args, **kwargs)
yield name
shutil.rmtree(name, onerror=_del_ro)
def get_cert(config: Config, repository_name: str) -> Path | None:
cert = config.get(f"certificates.{repository_name}.cert")
if cert:
return Path(cert)
else:
return None
def get_client_cert(config: Config, repository_name: str) -> Path | None:
client_cert = config.get(f"certificates.{repository_name}.client-cert")
if client_cert:
return Path(client_cert)
else:
return None
def _on_rm_error(func: Callable, path: str, exc_info: Exception) -> None:
if not os.path.exists(path):
return
os.chmod(path, stat.S_IWRITE)
func(path)
def safe_rmtree(path: str) -> None:
if Path(path).is_symlink():
return os.unlink(str(path))
shutil.rmtree(path, onerror=_on_rm_error)
def merge_dicts(d1: dict, d2: dict) -> None:
for k in d2.keys():
if k in d1 and isinstance(d1[k], dict) and isinstance(d2[k], Mapping):
merge_dicts(d1[k], d2[k])
else:
d1[k] = d2[k]
def download_file(
url: str,
dest: str,
session: Session | None = None,
chunk_size: int = 1024,
) -> None:
import requests
get = requests.get if not session else session.get
response = get(url, stream=True)
response.raise_for_status()
with open(dest, "wb") as f:
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
def get_package_version_display_string(
package: Package, root: Path | None = None
) -> str:
if package.source_type in ["file", "directory"] and root:
path = Path(os.path.relpath(package.source_url, root.as_posix())).as_posix()
return f"{package.version} {path}"
return package.full_pretty_version
def paths_csv(paths: list[Path]) -> str:
return ", ".join(f'"{c!s}"' for c in paths)
def is_dir_writable(path: Path, create: bool = False) -> bool:
try:
if not path.exists():
if not create:
return False
path.mkdir(parents=True, exist_ok=True)
with tempfile.TemporaryFile(dir=str(path)):
pass
except OSError:
return False
else:
return True
def pluralize(count: int, word: str = "") -> str:
if count == 1:
return word
return word + "s"
|
# Variational Bayes for binary logistic regression using JJ bound and mean field Gaussian-Gamma.
# Also, Laplace approx with EB for multiclass.
# Written by Amazasp Shaumyan
#https://github.com/AmazaspShumik/sklearn-bayes/blob/master/skbayes/linear_models/bayes_logistic.py
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
from sklearn.utils.optimize import newton_cg
from scipy.special import expit, exprel
from scipy.linalg import eigvalsh
from sklearn.utils.multiclass import check_classification_targets
from sklearn.linear_model.base import LinearClassifierMixin, BaseEstimator
from sklearn.utils import check_X_y
from scipy.linalg import solve_triangular
from sklearn.linear_model.logistic import ( _logistic_loss_and_grad, _logistic_loss,
_logistic_grad_hess,)
class BayesianLogisticRegression(LinearClassifierMixin, BaseEstimator):
'''
Superclass for two different implementations of Bayesian Logistic Regression
'''
def __init__(self, n_iter, tol, fit_intercept, verbose):
self.n_iter = n_iter
self.tol = tol
self.fit_intercept = fit_intercept
self.verbose = verbose
def fit(self,X,y):
'''
Fits Bayesian Logistic Regression
Parameters
-----------
X: array-like of size (n_samples, n_features)
Training data, matrix of explanatory variables
y: array-like of size (n_samples, )
Target values
Returns
-------
self: object
self
'''
# preprocess data
X,y = check_X_y( X, y , dtype = np.float64)
check_classification_targets(y)
self.classes_ = np.unique(y)
n_classes = len(self.classes_)
# prepare for ovr if required
n_samples, n_features = X.shape
if self.fit_intercept:
X = self._add_intercept(X)
if n_classes < 2:
raise ValueError("Need samples of at least 2 classes")
if n_classes > 2:
self.coef_, self.sigma_ = [0]*n_classes,[0]*n_classes
self.intercept_ = [0]*n_classes
else:
self.coef_, self.sigma_, self.intercept_ = [0],[0],[0]
# make classifier for each class (one-vs-the rest)
for i in range(len(self.coef_)):
if n_classes == 2:
pos_class = self.classes_[1]
else:
pos_class = self.classes_[i]
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = self._mask_val
coef_, sigma_ = self._fit(X,y_bin)
if self.fit_intercept:
self.intercept_[i],self.coef_[i] = self._get_intercept(coef_)
else:
self.coef_[i] = coef_
self.sigma_[i] = sigma_
self.coef_ = np.asarray(self.coef_)
return self
def predict_proba(self,X):
'''
Predicts probabilities of targets for test set
Parameters
----------
X: array-like of size [n_samples_test,n_features]
Matrix of explanatory variables (test set)
Returns
-------
probs: numpy array of size [n_samples_test]
Estimated probabilities of target classes
'''
# construct separating hyperplane
scores = self.decision_function(X)
if self.fit_intercept:
X = self._add_intercept(X)
# probit approximation to predictive distribution
sigma = self._get_sigma(X)
ks = 1. / ( 1. + np.pi*sigma / 8)**0.5
probs = expit(scores.T*ks).T
# handle several class cases
if probs.shape[1] == 1:
probs = np.hstack([1 - probs, probs])
else:
probs /= np.reshape(np.sum(probs, axis = 1), (probs.shape[0],1))
return probs
def _add_intercept(self,X):
'''Adds intercept to data matrix'''
raise NotImplementedError
def _get_intercept(self,coef):
'''
Extracts value of intercept from coefficients
'''
raise NotImplementedError
def _get_sigma(self,X):
'''
Computes variance of predictive distribution (which is then used in
probit approximation of sigmoid)
'''
raise NotImplementedError
class EBLogisticRegression(BayesianLogisticRegression):
'''
Implements Bayesian Logistic Regression with type II maximum likelihood
(sometimes it is called Empirical Bayes), uses Gaussian (Laplace) method
for approximation of evidence function.
Parameters
----------
n_iter: int, optional (DEFAULT = 50)
Maximum number of iterations before termination
tol: float, optional (DEFAULT = 1e-3)
If absolute change in precision parameter for weights is below threshold
algorithm terminates.
solver: str, optional (DEFAULT = 'lbfgs_b')
Optimization method that is used for finding parameters of posterior
distribution ['lbfgs_b','newton_cg']
n_iter_solver: int, optional (DEFAULT = 15)
Maximum number of iterations before termination of solver
tol_solver: float, optional (DEFAULT = 1e-3)
Convergence threshold for solver (it is used in estimating posterior
distribution),
fit_intercept : bool, optional ( DEFAULT = True )
If True will use intercept in the model. If set
to false, no intercept will be used in calculations
alpha: float (DEFAULT = 1e-6)
Initial regularization parameter (precision of prior distribution)
verbose : boolean, optional (DEFAULT = True)
Verbose mode when fitting the model
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of posterior distribution)
sigma_ : array, shape = (n_features, )
eigenvalues of covariance matrix
alpha_: float
Precision parameter of weight distribution
intercept_: array, shape = (n_features)
intercept
References:
-----------
[1] Pattern Recognition and Machine Learning, Bishop (2006) (pages 293 - 294)
'''
def __init__(self, n_iter = 50, tol = 1e-3,solver = 'lbfgs_b',n_iter_solver = 15,
tol_solver = 1e-3, fit_intercept = True, alpha = 1e-6, verbose = False):
super(EBLogisticRegression,self).__init__(n_iter, tol, fit_intercept, verbose)
self.n_iter_solver = n_iter_solver
self.tol_solver = tol_solver
self.alpha = alpha
if solver not in ['lbfgs_b','newton_cg']:
raise ValueError(('Only "lbfgs_b" and "newton_cg" '
'solvers are implemented'))
self.solver = solver
# masking value (this is set for use in lbfgs_b and newton_cg)
self._mask_val = -1.
def _fit(self,X,y):
'''
Maximizes evidence function (type II maximum likelihood)
'''
# iterative evidence maximization
alpha = self.alpha
n_samples,n_features = X.shape
w0 = np.zeros(n_features)
for i in range(self.n_iter):
alpha0 = alpha
# find mean & covariance of Laplace approximation to posterior
w, d = self._posterior(X, y, alpha, w0)
mu_sq = np.sum(w**2)
# use Iterative updates for Bayesian Logistic Regression
# Note in Bayesian Logistic Gull-MacKay fixed point updates
# and Expectation - Maximization algorithm give the same update
# rule
alpha = X.shape[1] / (mu_sq + np.sum(d))
# check convergence
delta_alpha = abs(alpha - alpha0)
if delta_alpha < self.tol or i==self.n_iter-1:
break
# after convergence we need to find updated MAP vector of parameters
# and covariance matrix of Laplace approximation
coef_, sigma_ = self._posterior(X, y, alpha , w)
self.alpha_ = alpha
return coef_, sigma_
def _add_intercept(self,X):
'''
Adds intercept to data (intercept column is not used in lbfgs_b or newton_cg
it is used only in Hessian)
'''
return np.hstack((X,np.ones([X.shape[0],1])))
def _get_intercept(self,coef):
'''
Returns intercept and coefficients
'''
return coef[-1], coef[:-1]
def _get_sigma(self,X):
''' Compute variance of predictive distribution'''
return np.asarray([ np.sum(X**2*s,axis = 1) for s in self.sigma_])
def _posterior(self, X, Y, alpha0, w0):
'''
Iteratively refitted least squares method using l_bfgs_b or newton_cg.
Finds MAP estimates for weights and Hessian at convergence point
'''
n_samples,n_features = X.shape
if self.solver == 'lbfgs_b':
f = lambda w: _logistic_loss_and_grad(w,X[:,:-1],Y,alpha0)
w = fmin_l_bfgs_b(f, x0 = w0, pgtol = self.tol_solver,
maxiter = self.n_iter_solver)[0]
elif self.solver == 'newton_cg':
f = _logistic_loss
grad = lambda w,*args: _logistic_loss_and_grad(w,*args)[1]
hess = _logistic_grad_hess
args = (X[:,:-1],Y,alpha0)
w = newton_cg(hess, f, grad, w0, args=args,
maxiter=self.n_iter, tol=self.tol)[0]
else:
raise NotImplementedError('Liblinear solver is not yet implemented')
# calculate negative of Hessian at w
xw = np.dot(X,w)
s = expit(xw)
R = s * (1 - s)
Hess = np.dot(X.T*R,X)
Alpha = np.ones(n_features)*alpha0
if self.fit_intercept:
Alpha[-1] = np.finfo(np.float16).eps
np.fill_diagonal(Hess, np.diag(Hess) + Alpha)
e = eigvalsh(Hess)
return w,1./e
#============== VB Logistic Regression (with Jaakola Jordan bound) ==================
def lam(eps):
''' Calculates lambda eps (used for Jaakola & Jordan local bound) '''
eps = -abs(eps)
return 0.25 * exprel(eps) / (np.exp(eps) + 1)
class VBLogisticRegression(BayesianLogisticRegression):
'''
Variational Bayesian Logistic Regression with local variational approximation.
Parameters:
-----------
n_iter: int, optional (DEFAULT = 50 )
Maximum number of iterations
tol: float, optional (DEFAULT = 1e-3)
Convergence threshold, if cange in coefficients is less than threshold
algorithm is terminated
fit_intercept: bool, optinal ( DEFAULT = True )
If True uses bias term in model fitting
a: float, optional (DEFAULT = 1e-6)
Rate parameter for Gamma prior on precision parameter of coefficients
b: float, optional (DEFAULT = 1e-6)
Shape parameter for Gamma prior on precision parameter of coefficients
verbose: bool, optional (DEFAULT = False)
Verbose mode
Attributes
----------
coef_ : array, shape = (n_features)
Coefficients of the regression model (mean of posterior distribution)
sigma_ : array, shape = (n_features, n_features)
estimated covariance matrix of the weights, computed only
for non-zero coefficients
intercept_: array, shape = (n_features)
intercepts
References:
-----------
[1] Bishop 2006, Pattern Recognition and Machine Learning ( Chapter 10 )
[2] Murphy 2012, Machine Learning A Probabilistic Perspective ( Chapter 21 )
'''
def __init__(self, n_iter = 50, tol = 1e-3, fit_intercept = True,
a = 1e-4, b = 1e-4, verbose = True):
super(VBLogisticRegression,self).__init__(n_iter, tol, fit_intercept, verbose)
self.a = a
self.b = b
self._mask_val = 0.
def _fit(self,X,y):
'''
Fits single classifier for each class (for OVR framework)
'''
eps = 1
n_samples, n_features = X.shape
XY = np.dot( X.T , (y-0.5))
w0 = np.zeros(n_features)
# hyperparameters of q(alpha) (approximate distribution of precision
# parameter of weights)
a = self.a + 0.5 * n_features
b = self.b
for i in range(self.n_iter):
# In the E-step we update approximation of
# posterior distribution q(w,alpha) = q(w)*q(alpha)
# --------- update q(w) ------------------
l = lam(eps)
w,Ri = self._posterior_dist(X,l,a,b,XY)
# -------- update q(alpha) ---------------
if self.fit_intercept:
b = self.b + 0.5*(np.sum(w[1:]**2) + np.sum(Ri[1:,:]**2))
else:
b = self.b + 0.5*(np.sum(w**2) + np.sum(Ri**2))
# -------- update eps ------------
# In the M-step we update parameter eps which controls
# accuracy of local variational approximation to lower bound
XMX = np.dot(X,w)**2
XSX = np.sum( np.dot(X,Ri.T)**2, axis = 1)
eps = np.sqrt( XMX + XSX )
#print('iter {}'.format(i))
#print(a)
#print(b)
#print(w)
# convergence
if np.sum(abs(w-w0) > self.tol) == 0 or i==self.n_iter-1:
break
w0 = w
l = lam(eps)
coef_, sigma_ = self._posterior_dist(X,l,a,b,XY,True)
self.a = a
self.b = b
return coef_, sigma_
def _add_intercept(self,X):
'''Adds intercept to data matrix'''
return np.hstack((np.ones([X.shape[0],1]),X))
def _get_intercept(self, coef):
''' Returns intercept and coefficients '''
return coef[0], coef[1:]
def _get_sigma(self,X):
''' Compute variance of predictive distribution'''
return np.asarray([np.sum(np.dot(X,s)*X,axis = 1) for s in self.sigma_])
def _posterior_dist(self,X,l,a,b,XY,full_covar = False):
'''
Finds gaussian approximation to posterior of coefficients using
local variational approximation of Jaakola & Jordan
'''
sigma_inv = 2*np.dot(X.T*l,X)
alpha_vec = np.ones(X.shape[1])*float(a) / b
if self.fit_intercept:
alpha_vec[0] = np.finfo(np.float16).eps
np.fill_diagonal(sigma_inv, np.diag(sigma_inv) + alpha_vec)
R = np.linalg.cholesky(sigma_inv)
Z = solve_triangular(R,XY, lower = True)
mean = solve_triangular(R.T,Z,lower = False)
# is there any specific function in scipy that efficently inverts
# low triangular matrix ????
Ri = solve_triangular(R,np.eye(X.shape[1]), lower = True)
if full_covar:
sigma = np.dot(Ri.T,Ri)
return mean, sigma
else:
return mean , Ri
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from pykg2vec.core.KGMeta import ModelMeta, InferenceMeta
class TuckER(ModelMeta, InferenceMeta):
""" `TuckER-Tensor Factorization for Knowledge Graph Completion`_
TuckER is a Tensor-factorization-based embedding technique based on
the Tucker decomposition of a third-order binary tensor of triplets. Although
being fully expressive, the number of parameters used in Tucker only grows linearly
with respect to embedding dimension as the number of entities or relations in a
knowledge graph increases.
Args:
config (object): Model configuration parameters.
Attributes:
config (object): Model configuration.
data_stats (object): ModelMeta object instance. It consists of the knowledge graph metadata.
model_name (str): Name of the model.
Examples:
>>> from pykg2vec.core.TuckER import TuckER
>>> from pykg2vec.utils.trainer import Trainer
>>> model = TuckER()
>>> trainer = Trainer(model=model, debug=False)
>>> trainer.build_model()
>>> trainer.train_model()
.. _TuckER-Tensor Factorization for Knowledge Graph Completion:
https://arxiv.org/pdf/1901.09590.pdf
"""
def __init__(self, config=None):
self.config = config
self.data_stats = self.config.kg_meta
self.model_name = 'TuckER'
self.def_layer()
def def_inputs(self):
"""Defines the inputs to the model.
Attributes:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
hr_t (Tensor): Tail tensor list for (h,r) pair.
rt_h (Tensor): Head tensor list for (r,t) pair.
test_h_batch (Tensor): Batch of head ids for testing.
test_r_batch (Tensor): Batch of relation ids for testing
test_t_batch (Tensor): Batch of tail ids for testing.
"""
self.h = tf.placeholder(tf.int32, [None])
self.r = tf.placeholder(tf.int32, [None])
self.t = tf.placeholder(tf.int32, [None])
self.hr_t = tf.placeholder(tf.float32, [None, self.data_stats.tot_entity])
self.rt_h = tf.placeholder(tf.float32, [None, self.data_stats.tot_entity])
self.test_h_batch = tf.placeholder(tf.int32, [None])
self.test_t_batch = tf.placeholder(tf.int32, [None])
self.test_r_batch = tf.placeholder(tf.int32, [None])
def def_parameters(self):
"""Defines the model parameters.
Attributes:
num_total_ent (int): Total number of entities.
num_total_rel (int): Total number of relations.
d2 (Tensor): Size of the latent dimension for relations.
d1 (Tensor): Size of the latent dimension for entities .
parameter_list (list): List of Tensor parameters.
E (Tensor Variable): Lookup variable containing embedding of the entities.
R (Tensor Variable): Lookup variable containing embedding of the relations.
W (Tensor Varible): Transformation matrix.
"""
num_total_ent = self.data_stats.tot_entity
num_total_rel = self.data_stats.tot_relation
self.d1 = self.config.ent_hidden_size
self.d2 = self.config.rel_hidden_size
with tf.name_scope("embedding"):
self.ent_embeddings = tf.get_variable(name="ent_embedding", shape=[num_total_ent, self.d1],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
self.rel_embeddings = tf.get_variable(name="rel_embedding", shape=[num_total_rel, self.d2],
initializer=tf.contrib.layers.xavier_initializer(uniform=False))
with tf.name_scope("W"):
self.W = tf.get_variable(name="W", shape=[self.d2, self.d1, self.d1],
initializer=tf.initializers.random_uniform(minval=-1, maxval=1))
self.parameter_list = [self.ent_embeddings, self.rel_embeddings, self.W]
def def_layer(self):
"""Defines the layers of the algorithm."""
self.inp_drop = tf.keras.layers.Dropout(rate=self.config.input_dropout)
self.hidden_dropout1 = tf.keras.layers.Dropout(rate=self.config.hidden_dropout1)
self.hidden_dropout2 = tf.keras.layers.Dropout(rate=self.config.hidden_dropout2)
self.bn0 = tf.keras.layers.BatchNormalization(trainable=True)
self.bn1 = tf.keras.layers.BatchNormalization(trainable=True)
def forward(self, e1, r):
"""Implementation of the layer.
Args:
e1(Tensor): entities id.
r(Tensor): Relation id.
Returns:
Tensors: Returns the activation values.
"""
norm_E = tf.nn.l2_normalize(self.ent_embeddings, axis=1)
norm_R = tf.nn.l2_normalize(self.rel_embeddings, axis=1)
e1 = tf.nn.embedding_lookup(norm_E, e1)
rel = tf.squeeze(tf.nn.embedding_lookup(norm_R, r))
e1 = self.bn0(e1)
e1 = self.inp_drop(e1)
e1 = tf.reshape(e1, [-1, 1, self.config.ent_hidden_size])
W_mat = tf.matmul(rel, tf.reshape(self.W, [self.d2, -1]))
W_mat = tf.reshape(W_mat, [-1, self.d1, self.d1])
W_mat = self.hidden_dropout1(W_mat)
x = tf.matmul(e1, W_mat)
x = tf.reshape(x, [-1, self.d1])
x = self.bn1(x)
x = self.hidden_dropout2(x)
x = tf.matmul(x, tf.transpose(norm_E))
return tf.nn.sigmoid(x)
def def_loss(self):
"""Defines the loss function for the algorithm."""
pred_tails = self.forward(self.h, self.r)
pred_heads = self.forward(self.t, self.r)
hr_t = self.hr_t * (1.0 - self.config.label_smoothing) + 1.0 / self.data_stats.tot_entity
rt_h = self.rt_h * (1.0 - self.config.label_smoothing) + 1.0 / self.data_stats.tot_entity
loss_tails = tf.reduce_mean(tf.keras.backend.binary_crossentropy(hr_t, pred_tails))
loss_heads = tf.reduce_mean(tf.keras.backend.binary_crossentropy(rt_h, pred_heads))
reg_losses = tf.nn.l2_loss(self.ent_embeddings) + tf.nn.l2_loss(self.rel_embeddings) + tf.nn.l2_loss(self.W)
self.loss = loss_heads + loss_tails + self.config.lmbda * reg_losses
def test_batch(self):
"""Function that performs batch testing for the algorithm.
Returns:
Tensors: Returns ranks of head and tail.
"""
pred_tails = self.forward(self.test_h_batch, self.test_r_batch)
pred_heads = self.forward(self.test_t_batch, self.test_r_batch)
_, head_rank = tf.nn.top_k(pred_tails, k=self.data_stats.tot_entity)
_, tail_rank = tf.nn.top_k(pred_heads, k=self.data_stats.tot_entity)
return head_rank, tail_rank
# Override
def dissimilarity(self, h, r, t):
"""Function to calculate dissimilarity measure in embedding space.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
axis (int): Determines the axis for reduction
Returns:
Tensors: Returns the dissimilarity measure.
"""
if self.config.L1_flag:
return tf.reduce_sum(tf.abs(h + r - t), axis=1) # L1 norm
else:
return tf.reduce_sum((h + r - t) ** 2, axis=1) # L2 norm
def embed(self, h, r, t):
"""Function to get the embedding value.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns real and imaginary values of head, relation and tail embedding.
"""
emb_h = tf.nn.embedding_lookup(self.ent_embeddings, h)
emb_r = tf.nn.embedding_lookup(self.rel_embeddings, r)
emb_t = tf.nn.embedding_lookup(self.ent_embeddings, t)
return emb_h, emb_r, emb_t
def get_embed(self, h, r, t, sess=None):
"""Function to get the embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
Returns:
Tensors: Returns real and imaginary values of head, relation and tail embedding.
"""
emb_h, emb_r, emb_t = self.embed(h, r, t)
h, r, t = sess.run([emb_h, emb_r, emb_t])
return h, r, t
def get_proj_embed(self, h, r, t, sess):
"""Function to get the projected embedding value in numpy.
Args:
h (Tensor): Head entities ids.
r (Tensor): Relation ids of the triple.
t (Tensor): Tail entity ids of the triple.
"""
return self.get_embed(h, r, t, sess)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.