text stringlengths 8 6.05M |
|---|
import boto3
import json
# def get_route_details(region):
# client = boto3.client('route53', region_name=region)
# response = client.list_hosted_zones()
# response = response['HostedZones']
# ids = []
# for i in range(len(response)):
# temp = response[i]
# ids.append(temp['Id'])
# return ids
#
#
# def get_route_dets(region, req_id):
# client = boto3.client('route53', region_name=region)
# response = client.get_hosted_zone(
# Id=req_id
# )
# response = response['HostedZone']
# print("ID : {}".format(response['Id']))
# print("Name : {}".format(response['Name']))
def get_all_route_details():
"""
A fucntion that gives route 53 hosted zones informarion
"""
conn = boto3.client('route53')
# get all hosted zones ids
response = conn.list_hosted_zones()['HostedZones']
route_ids = []
route_info = []
for res in response:
route_ids.append(res['Id'])
# get info of each hosted zone
for ids in route_ids:
response = conn.get_hosted_zone(
Id=ids
)['HostedZone']
route_info.append(response)
# convert zone list to dictionary
final_dict = {"Route 53": route_info}
# covert dictionart to json
json_final = json.dumps(final_dict, indent=4, default=str)
print(json_final)
# value = get_route_details('ap-south-1')
# get_route_dets('ap-south-1', value[0])
get_all_route_details()
|
from getSqlData import *
from encodeJson import *
def getData(latitude, longitude):
SqlData = getSqlData(latitude, longitude)
jsonData = encodeJson(SqlData)
return jsonData
|
print 1+2;
f = open('test', 'w');
f.write('6');
f.close();
f = open('test2', 'w');
f.write('demo output');
f.close();
|
s = 0
pa = int (input ('Digite o primeiro termo da PA '))
razao = int (input ('Digite a razão da PA '))
razao1 = razao * 10
for c in range(pa,razao1+1,razao):
s = razao + s
print(s)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pexpect
import sys
import os
import configparser
import re
from funcy import re_all, partial, lmap, re_find
from funcy import select, distinct, filter, re_test, lmapcat
from toolz import thread_last
prompter = "#$"
pager = "--More--"
logfile = sys.stdout
config = configparser.ConfigParser()
config.read(os.path.expanduser('~/.weihu/config.ini'))
username = config.get('olt', 'zte_username')
password = config.get('olt', 'zte_password')
def telnet(ip):
child = pexpect.spawn('telnet {0}'.format(ip), encoding='ISO-8859-1')
child.logfile = logfile
child.expect("[uU]sername:")
child.sendline(username)
child.expect("[pP]assword:")
child.sendline(password)
child.expect(prompter)
return child
def do_some(child, cmd, timeout=120):
result = []
child.sendline(cmd)
while 1:
index = child.expect([prompter, pager], timeout=timeout)
result.append(child.before)
if index == 0:
break
else:
child.send(' ')
continue
rslt = ''.join(result).replace('\x08', '').replace(cmd + '\r\n', '', 1)
return rslt
def close(child):
child.sendcontrol('z')
child.expect(prompter)
child.sendline('exit')
child.close()
def get_svlan(ip):
def _format(port):
temp = re_find(r'_(\d+)/(\d+)/(\d+)', port)
temp = map(lambda x: x if len(x) > 1 else '0' + x, temp)
return '/'.join(temp)
try:
child = telnet(ip)
rslt1 = do_some(child, 'show vlan-smart-qinq')
close(child)
except (pexpect.EOF, pexpect.TIMEOUT):
return [(ip, 'ZTE', 'fail')]
rslt1 = re.split(r'\r\n\s*', rslt1)
rslt1 = (re.split(r'\s+', x) for x in rslt1 if x.startswith('epon'))
rslt1 = [[ip, _format(x[0]), x[5]] for x in rslt1
if 51 <= int(x[1]) <= 1999]
return rslt1
def get_pon_ports(ip):
try:
child = telnet(ip)
rslt = do_some(child, 'show run | in interface [eg]pon-olt')
child.sendline('exit')
child.close()
rslt1 = re.split(r'\r\n\s*', rslt)[:-1]
except (pexpect.EOF, pexpect.TIMEOUT) as e:
return ('fail', None, ip)
return ('success', rslt1, ip)
def get_port_onus(child, port):
rslt = do_some(child, 'show run {port}'.format(port=port))
rslt1 = re_all(r'onu\s(\d+)\stype\sE8C[PG]24\sloid\s([A-F0-9]{16})', rslt)
return (port, rslt1)
def get_onus(ip):
mark, ports = get_pon_ports(ip)[:-1]
if mark == 'fail':
return ('fail', None, ip)
try:
child = telnet(ip)
gpo = partial(get_port_onus, child)
rslt = lmap(gpo, ports)
child.sendline('exit')
child.close()
except (pexpect.EOF, pexpect.TIMEOUT) as e:
return ('fail', None, ip)
rslt1 = filter(lambda x: bool(x[1]), rslt)
return ('success', rslt1, ip)
def get_groups(ip):
def _get_infs(record):
name = re_find(r'(Smartgroup:\d+)', record)
if name:
name = name.lower().replace(':', '')
infs = re_all(r'(x?gei_\d+/\d+/\d+)\s?selected', record)
return dict(name=name, infs=infs)
def _get_desc_mode(child, group):
rslt = do_some(child, 'show run int {name}'.format(name=group['name']))
desc = re_find(r'description\s+(\S+)', rslt)
group['desc'] = desc
rslt = do_some(
child, 'show run int {inf}'.format(inf=group['infs'][0]))
mode = re_find(r'smartgroup\s\d+\smode\s(\S+)', rslt)
group['mode'] = mode
return group
try:
child = telnet(ip)
rslt = re.split(r'\r\n\s*\r\n', do_some(child, 'show lacp internal'))
groups = thread_last(rslt, (lmap, _get_infs),
(select, lambda x: x['name'] and x['infs']))
lmap(partial(_get_desc_mode, child), groups)
close(child)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
return ('fail', None, ip)
return ('success', groups, ip)
def get_infs(ip):
def _get_info(child, inf):
rslt = do_some(child, 'show int {inf}'.format(inf=inf))
desc = re_find(r'Description\sis\s(\S+)', rslt)
state = re_find(r'{inf}\sis\s(\S+\s?\S+),'.format(inf=inf), rslt)
bw = re_find(r'BW\s(\d+)\sKbits', rslt)
bw = int(bw or 0) / 1000
inTraffic = re_find(r'seconds\sinput\srate\s?:\s+(\d+)\sBps', rslt)
inTraffic = int(inTraffic or 0) * 8 / 1e6
outTraffic = re_find(r'seconds\soutput\srate:\s+(\d+)\sBps', rslt)
outTraffic = int(outTraffic or 0) * 8 / 1e6
return dict(
name=inf,
desc=desc,
state=state,
bw=bw,
inTraffic=inTraffic,
outTraffic=outTraffic)
try:
child = telnet(ip)
rslt = do_some(child, 'show run | in interface', timeout=180)
rslt = re_all(r'interface\s+(x?gei_\d+/\d+/\d+)', rslt)
infs = lmap(partial(_get_info, child), rslt)
close(child)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
return ('fail', None, ip)
return ('success', infs, ip)
def get_main_card(ip):
try:
child = telnet(ip)
rslt = do_some(child, 'show card')
close(child)
cards = re_all(
r'\d\s+\d\s+\d{1,2}\s+(SCXM|GCSA).*(?:INSERVICE|STANDBY)', rslt)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
return ('fail', None, ip)
return ('success', len(cards), ip)
def get_power_info(ip):
try:
child = telnet(ip)
temp = do_some(child, 'show alarm pool')
close(child)
except (pexpect.EOF, pexpect.TIMEOUT) as e:
return ('fail', None, ip)
powerInfo = re_find(r'Alarm Code\s+:\s+(33054|53504)', temp)
if powerInfo:
rslt = 'alarm'
else:
rslt = 'normal'
return ('success', rslt, ip)
def no_shut(ip, inf):
try:
child = telnet(ip)
do_some(child, 'conf t')
do_some(child, 'interface {inf}'.format(inf=inf))
do_some(child, 'no shutdown')
close(child)
except (pexpect.EOF, pexpect.TIMEOUT):
return ('fail', ip)
return ('success', ip)
def get_inf(ip, inf):
try:
child = telnet(ip)
rslt = do_some(child, 'show interface {inf}'.format(inf=inf))
close(child)
except (pexpect.EOF, pexpect.TIMEOUT):
return ('fail', None, ip)
state = re_find(r'is (\w+\s?\w+)', rslt)
return ('success', state, ip)
def get_active_port(ip):
def _get_active_port(child, inf):
info = do_some(child, 'show {0}'.format(inf))
if re_test(r'line\sprotocol\sis\sup', info):
return inf
else:
return ''
try:
child = telnet(ip)
rslt = do_some(child, 'show run | include interface', timeout=300)
infs = [
_get_active_port(child, inf) for inf in rslt.split('\r\n')
if re_test(r'interface (xg|g)ei(?i)', inf)
]
close(child)
except Exception:
return [[ip, 'ZTE', 'failed']]
infs = [x.split()[1] for x in infs if x]
infs = [[ip, 'successed', x] for x in infs]
return infs
|
fname=input("Enter file name: ")
try:
f=open(fname)
except:
print("Entered file name is not available")
quit()
for l in f:
l=l.rstrip()
print(l.upper())
|
from django.contrib import admin
from django.apps import apps
from course.management.commands import refreshachievements
from .models import *
from .forms.forms import UserCreationForm, CaptchaPasswordResetForm
from django.forms import BaseInlineFormSet, ModelForm
from django.forms.widgets import TextInput
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django.contrib import messages
from django.utils.translation import gettext_lazy as _
from django.utils.translation import ungettext
import markdown2
from django.utils.formats import date_format
def invite_user(modeladmin, request, queryset):
for user in queryset:
if user.has_usable_password():
messages.error(
request,
_("User %(username)s already has a password!") % {'username': user.username}
)
return
elif user.email == '':
messages.error(
request,
_("User %(username)s has no email!") % {'username': user.username }
)
return
for user in queryset:
reset_form = CaptchaPasswordResetForm({'email': user.email})
reset_form.is_valid()
reset_form.save(
request=request,
use_https=request.is_secure(),
subject_template_name='email/password_creation_subject.txt',
html_email_template_name='email/password_creation_email.html',
)
success_message = ungettext(
"%(count)d email sent successfully!",
"%(count)d emails sent successfully!",
queryset.count()
)% {
'count': queryset.count(),
}
messages.success(request, success_message)
invite_user.short_description = _("Send invitation to selected users")
def has_usable_password(self):
return _( str( self.has_usable_password() ) )
has_usable_password.short_description = _('Has Password?')
def last_login_formatted(self):
if self.last_login == None:
return None
return date_format(timezone.localtime(self.last_login), format='SHORT_DATETIME_FORMAT', use_l10n=True)
last_login_formatted.short_description = _('Last Login')
class UserAdmin(UserAdmin):
list_display = ('email', 'first_name', 'last_name', has_usable_password, last_login_formatted, 'date_joined')
actions = (invite_user,)
add_form = UserCreationForm
list_filter = ('last_login', 'groups',)
ordering = ('first_name', 'last_name')
def save_model(self, request, obj, form, change):
if not change and (not form.cleaned_data['password1'] or not obj.has_usable_password()):
obj.set_unusable_password()
super(UserAdmin, self).save_model(request, obj, form, change)
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
class BasicAdmin(admin.ModelAdmin):
class Media:
css = { "all" : ("course/admin.css",) }
js = ["course/admin.js"]
class CourseForm(ModelForm):
class Meta:
model = CourseClass
fields = '__all__'
widgets = {
'primary_hex_color': TextInput(attrs={'type': 'color'}),
'secondary_hex_color': TextInput(attrs={'type': 'color'}),
}
class CourseAdmin(BasicAdmin):
form = CourseForm
list_display = ('name', 'code', 'description')
ordering = ('name',)
admin.site.register(Course, CourseAdmin)
def duplicate_course_class(modeladmin, request, queryset):
for course_class in queryset:
new_course_class = CourseClass.objects.get(pk=course_class.id)
new_course_class.id = None
# Duplicate course class, adding a (number) on the code
copy_number = 2
while True:
new_code = "%s (%d)" % (course_class.code, copy_number)
if CourseClass.objects.filter(code=new_code).first() == None:
new_course_class.code = new_code
break
else:
copy_number += 1
new_course_class.save()
# Duplicate assignment tasks from original course class
existing_assignment_tasks = AssignmentTask.objects.filter(course_class=course_class).order_by('id')
for existing_assignment_task in existing_assignment_tasks:
new_assignment_task = existing_assignment_task
new_assignment_task.id = None
new_assignment_task.course_class = new_course_class
new_assignment_task.save()
# Duplicate badges tasks from original course class
class_badges = ClassBadge.objects.filter(course_class=course_class).order_by('id')
for class_badge in class_badges:
criteria_list = ClassBadgeCriteria.objects.filter(class_badge=class_badge).order_by('id') # this line must come before we change the id in class_badge
class_badge.id = None
class_badge.course_class = new_course_class
class_badge.save()
for criteria in criteria_list:
criteria.id = None
criteria.class_badge = class_badge
criteria.save()
duplicate_course_class.short_description = _("Duplicate course class")
def refresh_achievements(modeladmin, request, queryset):
refreshachievements.refresh_achievements(queryset)
refresh_achievements.short_description = _("Refresh achievements")
class CourseClassAdmin(BasicAdmin):
list_display = ('code', 'course', 'start_date', 'end_date')
ordering = ('-start_date', 'course', 'code')
actions = (duplicate_course_class, refresh_achievements)
admin.site.register(CourseClass, CourseClassAdmin)
class TaskAdmin(BasicAdmin):
list_display = ('name', 'course', 'description')
list_filter = ('course',)
ordering = ('course', 'name',)
admin.site.register(Task, TaskAdmin)
class AssignmentTaskInline(admin.TabularInline):
model = AssignmentTask
extra = 1
ordering = ('id',)
class AssignmentAdmin(BasicAdmin):
inlines = [AssignmentTaskInline]
list_display = ('name', 'course', 'description')
list_filter = ('course',)
ordering = ('name',)
admin.site.register(Assignment, AssignmentAdmin)
class GradeInlineFormSet(BaseInlineFormSet):
model = Grade
_enrollment_ids = None
@property
def enrollment_ids(self):
if self.instance.assignment_id == None:
return []
if not self._enrollment_ids:
self._enrollment_ids = list(Enrollment.objects.filter(
course_class = self.instance.course_class
).order_by(
'student__full_name'
).values_list('id', flat=True))
return self._enrollment_ids
def total_form_count(self):
return len(self.enrollment_ids) if self.instance.id != None else 0
def __init__(self, *args, **kwargs):
super(GradeInlineFormSet, self).__init__(*args, **kwargs)
enrollment_ids = list(self.enrollment_ids) # make a copy of the list
index = 0
for form in self:
if form.instance.id != None:
if form.instance.enrollment.id in enrollment_ids:
enrollment_ids.remove(form.instance.enrollment.id)
else:
form.initial['enrollment'] = enrollment_ids[index]
form.initial['percentage'] = ""
index += 1
class GradeInline(admin.TabularInline):
model = Grade
ordering = ('enrollment__course_class__code', 'enrollment__student__full_name',)
formset = GradeInlineFormSet
raw_id_fields = ("enrollment",)
class AssignmentTaskAdmin(BasicAdmin):
inlines = [GradeInline]
list_display = ('__str__', 'points', 'course_class')
list_filter = ('course_class',)
ordering = ('-course_class', 'assignment_id', 'id',)
def course(self, obj):
return obj.assignment.course
admin.site.register(AssignmentTask, AssignmentTaskAdmin)
class EnrollmentInline(admin.TabularInline):
model = Enrollment
extra = 1
ordering = ('id',)
class EnrollmentGradeInlineFormSet(BaseInlineFormSet):
model = Grade
_assignment_tasks_ids = None
@property
def assignment_task_ids(self):
if self.instance.course_class_id == None:
return []
if not self._assignment_tasks_ids:
self._assignment_tasks_ids = list(AssignmentTask.objects.filter(
course_class = self.instance.course_class
).order_by(
'assignment_id', 'id'
).values_list('id', flat=True))
return self._assignment_tasks_ids
def total_form_count(self):
return len(self.assignment_task_ids) if self.instance.id != None else 0
def __init__(self, *args, **kwargs):
super(EnrollmentGradeInlineFormSet, self).__init__(*args, **kwargs)
assignment_task_ids = list(self.assignment_task_ids) # make a copy of the list
index = 0
for form in self:
if form.instance.id != None:
assignment_task_ids.remove(form.instance.assignment_task.id)
else:
form.initial['assignment_task'] = assignment_task_ids[index]
form.initial['percentage'] = ""
index += 1
class SimpleGradeInline(admin.TabularInline):
model = Grade
raw_id_fields = ("assignment_task",)
formset = EnrollmentGradeInlineFormSet
ordering = ('assignment_task__assignment_id', 'assignment_task')
def last_login_formatted_for_enrolment(self):
return last_login_formatted(self.student.user)
last_login_formatted_for_enrolment.short_description = _('Last Login')
class EnrollmentAdmin(BasicAdmin):
inlines = [SimpleGradeInline]
list_display = ('student', 'id_number', 'course_class', 'total_score', last_login_formatted_for_enrolment)
list_filter = ('course_class',)
ordering = ('-course_class__start_date', 'student__full_name')
search_fields = ('student__full_name',)
def id_number(self, object):
return object.student.id_number
admin.site.register(Enrollment, EnrollmentAdmin)
class StudentAdmin(BasicAdmin):
inlines = [EnrollmentInline]
list_display = ('full_name', 'id_number', 'enrollments')
search_fields = ('full_name',)
ordering = ('full_name',)
raw_id_fields = ("user",)
admin.site.register(Student, StudentAdmin)
class ClassInstructorInline(admin.TabularInline):
model = ClassInstructor
ordering = ('course_class_id',)
class InstructorAdmin(BasicAdmin):
list_display = ('full_name',)
search_fields = ('full_name',)
ordering = ('full_name',)
inlines = [ClassInstructorInline]
raw_id_fields = ('user',)
admin.site.register(Instructor, InstructorAdmin)
class PostAdmin(BasicAdmin):
model = Post
list_display = ('course_class', 'title','post_datetime')
ordering = ('-post_datetime',)
read_only = ('html_code',)
list_filter = ('course_class',)
def save_model(self, request, post, form, change):
post.html_code = markdown2.markdown(post.markdown_text, extras=["tables", "fenced-code-blocks"])
super().save_model(request, post, form, change)
admin.site.register(Post, PostAdmin)
class WidgetAdmin(BasicAdmin):
model = Widget
list_display = ('course_class', 'title', 'order')
ordering = ('course_class','order')
read_only = ('html_code',)
list_filter = ('course_class',)
def save_model(self, request, post, form, change):
post.html_code = markdown2.markdown(post.markdown_text, extras=["tables", "fenced-code-blocks"])
super().save_model(request, post, form, change)
admin.site.register(Widget, WidgetAdmin)
class BadgeAdmin(BasicAdmin):
model = Badge
list_display = ('name', 'thumbnail', 'course')
ordering = ('course', 'name')
def thumbnail(self, obj):
return "<img src='%s' style='max-width: 30px; max-height: 30px; border-radius: 50%%; background-color: %s' />" % (obj.icon_url, obj.course.primary_hex_color)
thumbnail.allow_tags = True
thumbnail.__name__ = 'Thumbnail'
admin.site.register(Badge, BadgeAdmin)
class AchievementInlineFormSet(BaseInlineFormSet):
model = Achievement
_enrollment_ids = None
@property
def enrollment_ids(self):
if self.instance.badge_id == None:
return []
if not self._enrollment_ids:
self._enrollment_ids = list(Enrollment.objects.filter(
course_class = self.instance.course_class
).order_by(
'student__full_name'
).values_list('id', flat=True))
return self._enrollment_ids
def total_form_count(self):
return len(self.enrollment_ids) if self.instance.id != None else 0
def __init__(self, *args, **kwargs):
super(AchievementInlineFormSet, self).__init__(*args, **kwargs)
enrollment_ids = list(self.enrollment_ids) # make a copy of the list
index = 0
for form in self:
if form.instance.id != None:
if form.instance.enrollment.id in enrollment_ids:
enrollment_ids.remove(form.instance.enrollment.id)
else:
form.initial['enrollment'] = enrollment_ids[index]
form.initial['percentage'] = ""
index += 1
class AchievementInline(admin.TabularInline):
model = Achievement
ordering = ('enrollment__student__full_name',)
formset = AchievementInlineFormSet
raw_id_fields = ("enrollment",)
class ClassBadgeCriteriaInline(admin.TabularInline):
model = ClassBadgeCriteria
extra = 1
ordering = ('id',)
class ClassBadgeAdmin(BasicAdmin):
model = ClassBadge
list_display = ('badge', 'description', 'course_class')
ordering = ('course_class', 'id')
inlines = [ClassBadgeCriteriaInline, AchievementInline]
list_filter = ('course_class',)
admin.site.register(ClassBadge, ClassBadgeAdmin) |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# 定义方法
def cheese_and_cracker(cheese_count, boxes_of_crackers):
print "You have %d cheese!" % cheese_count
print "You have %d boxes of crackers!" % boxes_of_crackers
print "Man that's enough for a party!"
print "Get a blanket.\n"
print "We can just give the function numbers directly:"
cheese_and_cracker(20, 30)
print "OR,we can use variables from our script:"
amount_of_cheese = 10
amount_of_crackers = 50
cheese_and_cracker(amount_of_cheese, amount_of_crackers)
print "We can even do math inside too:"
cheese_and_cracker(10 + 20, 5 + 6)
print "And wer can combine the two ,variables and math"
cheese_and_cracker(amount_of_cheese + 100, amount_of_crackers + 1000)
|
import tkinter
import tkinter.ttk as ttk
from world import world
from util_frames import ActorStatsFrame
from PIL import Image, ImageTk
import fonts
class PlayerActorFrame(ttk.Frame):
def __init__(self, master=None):
ttk.Frame.__init__(self, master)
self.actor_image = None
# create widgets
self.details_frame = ttk.Frame(self)
self.name_label = ttk.Label(self.details_frame, font=fonts.DETAIL_NAME_FONT)
self.alignment_label_frame = ttk.LabelFrame(self.details_frame, text='Alignment')
self.alignment_label = ttk.Label(self.alignment_label_frame)
self.gender_label_frame = ttk.LabelFrame(self.details_frame, text='Gender')
self.gender_label = ttk.Label(self.gender_label_frame)
self.image_box = ttk.Label(self, borderwidth=5, relief=tkinter.RIDGE)
self.actor_stats_frame = ActorStatsFrame(self)
# grid widgets
self.details_frame.grid()
self.alignment_label_frame.grid(column=0, row=1, sticky=tkinter.NW)
self.gender_label_frame.grid(column=1, row=1, sticky=tkinter.NW)
self.name_label.grid(column=0, row=0)
self.gender_label.grid()
self.alignment_label.grid()
self.image_box.grid(column=2, row=0, rowspan=8)
self.actor_stats_frame.grid(column=0, row=2, sticky=tkinter.NW)
self.refresh()
def refresh(self, _=None):
player_actor = world.player_actor
self.name_label.config(text=player_actor.name)
self.alignment_label.config(text=player_actor.alignment)
self.gender_label.config(text=player_actor.gender)
if player_actor.image != '' and player_actor.image is not None:
image = Image.open('images\\Full\\' + player_actor.image)
image = image.resize((200, 300), Image.ANTIALIAS)
tkimage = ImageTk.PhotoImage(image)
self.actor_image = tkimage
self.image_box.config(image=tkimage)
self.actor_stats_frame.actor = player_actor
self.actor_stats_frame.refresh()
class CurrentLocationFrame(ttk.Frame):
def __init__(self, master=None):
ttk.Frame.__init__(self, master)
# create widgets
self.name_label = ttk.Label(self, font=fonts.DETAIL_NAME_FONT)
self.actor_listbox_label_frame = ttk.LabelFrame(self, text='Characters at Location')
self.actor_listbox = tkinter.Listbox(self.actor_listbox_label_frame, activestyle='none')
self.actor_listbox.config(exportselection='False')
self.actor_id_listbox = []
self.area_label_frame = ttk.LabelFrame(self, text='Area')
self.dimension_label_frame = ttk.LabelFrame(self, text='Dimension')
self.timeframe_label_frame = ttk.LabelFrame(self, text='Timeframe')
self.area_label = ttk.Label(self.area_label_frame)
self.dimension_label = ttk.Label(self.dimension_label_frame)
self.timeframe_label = ttk.Label(self.timeframe_label_frame)
# grid widgets
self.name_label.grid(column=0, row=0, sticky=tkinter.NW)
self.actor_listbox_label_frame.grid(column=0, row=1)
self.actor_listbox.grid()
self.refresh()
def refresh(self, _=None):
location = world.get_current_location()
self.actor_listbox.delete(0, self.actor_listbox.size()-1)
self.actor_id_listbox.clear()
self.name_label.config(text=location.name)
actor_list = world.get_actor_list_from_loc(location.location_id)
for location_id, name in actor_list:
self.actor_listbox.insert(tkinter.END, name)
self.actor_id_listbox.append(location_id)
class ActionSelectionFrame(ttk.Frame):
def __init__(self, master=None):
ttk.Frame.__init__(master)
|
"""Plot Graph to Show Unemployed people in Thailand"""
import matplotlib.pyplot as plt
def main():
"""Plot graph from people in Thailand who are unemployed"""
x = range(2550, 2560)
y = [0.9961, 1.0056, 1.0836, 0.7522, 0.4894, 0.4752, 0.5152, 0.5883, 0.6165, 0.6787]
plt.plot(x, y, color="blue", marker="o", label=("Percentage of people who are unemployed"))
plt.title("People in Thailand who are unemployed in percentage")
plt.xlabel("Years")
plt.ylabel("Percent of people")
plt.legend()
plt.show()
main()
|
import random
a = 451
b = 55465215
c = a * b + b
# Testing 0
print(c)
def main():
while True:
if a < b:
print(str(b) + " " + "is greater")
break
elif a > b:
print(str(a) + " " + "is greater")
break
else:
print("test failed")
break
main()
|
#!/usr/bin/env python
# encoding=utf8
# made by zodman
import grab
import click
import urllib.parse
import logging
import slugify
from utils import get_conf
#logger = logging.getLogger('grab')
#logger.addHandler(logging.StreamHandler())
#logger.setLevel(logging.DEBUG)
def _upload_frozen(file, frozen_anime_slug,episode, images):
cfg = get_conf("frozen_layer")
user, passwd = cfg.get("username"), cfg.get("password")
anime_id = frozen_anime_slug
if not anime_id:
click.secho("No se subio a Frozen-Layer no se encontro fronzen id", bg="red", fg="white")
return
#robot = grab.Grab(verbose_logging=True, debug=True)
robot = grab.Grab(timeout=1080, connect_timeout=1080)
robot.setup(follow_location=True)
robot.setup(debug_post=True)
robot.setup(user_agent="FansubTool")
robot.go("https://www.frozen-layer.com/users/sign_in")
robot.doc.set_input('user[login]', user)
robot.doc.set_input('user[password]',passwd)
resp = robot.submit()
if "Has conectado correctamente." in str(robot.doc.body):
click.echo("frozen login success")
else:
click.secho("No se subio a Frozen-Layer login failed", bg="red", fg="white")
return
# fansub_id = 827 # Puya+
fansub_id = cfg.get("fansub_id") # PuyaSUbs
language = 'Japones'
subs = u"Español"
torrent_file = grab.UploadContent(open(file, "rb").read(),
filename="{}.torrent".format(file)
)
desc = cfg.get("description")
# magic!
#print (torrent_file.filename, desc)
r=robot.go("https://www.frozen-layer.com/descargas/nuevo/anime?id={}".format(anime_id))
robot.doc.set_input("descarga[episodio]", u"{}".format(episode))
robot.doc.set_input("descarga[fansub_id]", str(fansub_id))
robot.doc.set_input("idioma", language)
robot.doc.set_input("subtitulos", subs)
robot.doc.set_input("descarga[descripcion]",desc)
robot.doc.set_input("torrent", torrent_file)
resp = robot.submit(submit_name="login", remove_from_post=['torrent2',])
if "Ha habido un problema" in str(robot.doc.body):
click.secho("No se subio a Frozen-Layer", bg="red", fg="white")
click.echo("upload failed")
if images:
url_for_images = robot.doc.select("//div[@id='editar_imagenes']/b/a/@href").text()
url = "https://www.frozen-layer.com{}".format(url_for_images)
for i in images:
ff = grab.UploadContent(open(i).read())
file = os.path.basename("{}.jpg".format(slugify.slugify(u"{}".format(i))))
files_to_post = {'Filename':file, 'tipo':'descarga','Filedata':ff,'Upload':'Submit Query'}
robot.go(url,multipart_post=files_to_post)
url = robot.doc.url
click.secho("frozen edit url {}".format(url), fg="blue")
#url = urllib.parse.urlparse(url)
#id = urllib.parse.parse_qs(url.query).get("descarga_id").pop()
return "https://www.frozen-layer.com/descargas/{}".format(id)
|
#!/usr/bin/env python
import rospy
from dynamic_reconfigure.server import Server
from std_msgs.msg import Bool, Int8, Int32, Float64
from hektar.msg import wheelVelocity, armTarget, Claw
from hektar.cfg import HektarConfig
# Master control header. This node takes the state of features in the course and dictates arm and wheel motion.
DEFAULT = -1000
TICKS_REV = 240 #ish
ENCODER_ERROR = 20
ENCODER_SPEED = 70
SHOULDER_INDEX = 0
ELBOW_INDEX = 1
BASE_INDEX = 2
LEFT_FIRST_STONE = 0
LEFT_SECOND_STONE = 1
RIGHT_FIRST_STONE = 2
RIGHT_SECOND_STONE = 3
POSTS = (
(250, 250, 90),
(250, 250, 90),
(250, 250, -90),
(250, 250, -90),
)
GAUNTLET = (250, 250)
class Master():
def __init__(self):
self.pid_enable = rospy.Publisher('pid_enable', Bool, queue_size=1)
self.wheels = rospy.Publisher("wheel_output", wheelVelocity, queue_size=1)
self.speed = rospy.Publisher("set_speed", Int8, queue_size=1)
self.claw = rospy.Publisher("grabber", Claw, queue_size = 1) # publish angle from 0-180 for claw open/close
self.shoulder = rospy.Publisher("shoulder/setpoint", Float64, queue_size=1)
self.elbow = rospy.Publisher("elbow/setpoint", Float64, queue_size=1)
self.base = rospy.Publisher("base_setpoint", Int8, queue_size=1)
self.featuresHit = 0
self.claw_left = False
self.claw_right = True
self.left = False
self.collided = False
self.featureCallback = False
self.encoder_left = 0
self.encoder_right = 0
self.begin_left = 0
self.begin_right = 0
self.posts = (
(250, 250, 90),
(250, 250, 90),
(250, 250, -90),
(250, 250, -90),
)
self.pid_enable.publish(True)
self.speed.publish(100)
# outputs the number of encoder ticks
# thinking that one wheel moving one tick is about 1.14 deg or 2.3 for both
# also 95*2.5 ticks makes one full revolution of the wheel.
# assumes that PID is disabled.
def send_position(self, leftWheel, rightWheel): #send revolution * TICKS_REV
wheel = wheelVelocity();
leftTarget = leftWheel + self.encoder_left
rightTarget = rightWheel + self.encoder_right
leftDone, rightDone = False, False
i = 0
while leftDone == False or rightDone == False:
if leftTarget - self.encoder_left > ENCODER_ERROR:
wheel.wheelL = ENCODER_SPEED
elif self.encoder_left - leftTarget > ENCODER_ERROR:
wheel.wheelL = - ENCODER_SPEED
else:
wheel.wheelL = 0
leftDone = True
if rightTarget - self.encoder_right > ENCODER_ERROR:
wheel.wheelR = ENCODER_SPEED
elif self.encoder_right - rightTarget > ENCODER_ERROR:
wheel.wheelR = - ENCODER_SPEED
else:
wheel.wheelR = 0
rightDone = True
self.wheels.publish(wheel)
i += 1
if i > 2000:
rospy.loginfo("Timeout")
break
rospy.sleep(0.02)
rospy.loginfo("Done Reckon")
def collision_callback(self, msg):
self.collided = True
rospy.loginfo("Collision detected!")
self.pid_enable.publish(False)
rospy.sleep(0.03) # solution to avoid wheel_control_output collisions
# there is some latency from Pid to wheel control output
# which was leading to messages being sent after stop
self.wheels.publish(0,0)
rospy.sleep(0.5)
# now what - do we back up? turn around? keep driving? check we are still on tape?? spin until we find tape?
# seems like the strat should be that we 180 if we hit someone after the beeline to go collect stones, but if we hit
# someone during the beeline we just slightly change trajectory and continue, otherwise just wait and keep going?
# for now, let's just turn in place.
self.wheels.publish(40,-40)
rospy.sleep(2) #random guess - is this enough time to turn significantly?
self.wheels.publish(0,0)
self.pid_enable.publish(True)
self.collided = False
def switch_callback(self, msg): # set switch and reset the featues hit
self.left = msg.data
self.pid_enable.publish(True)
self.featuresHit = 0
self.begin_right = self.encoder_right
self.begin_left = self.encoder_left
self.speed.publish(100)
if self.left:
rospy.loginfo("switched to Left mode, reset featuresHit and Encoders")
else:
rospy.loginfo("switched to Right mode, reset featuresHit and Encoders")
def claw_callback_l(self, msg):
self.claw_left = msg.data
def claw_callback_r(self, msg):
self.claw_right = msg.data
#Thinking that if we get hit with a robot at a fork, we should continue on.
#For now colisions are only for the IR array
def fork_analysis_callback(self, msg):
self.featureCallback = True
#if self.collided: return
rospy.loginfo("Feature %d identified. Sleeping.", self.featuresHit)
self.pid_enable.publish(False)
#write 0 speed
stop = wheelVelocity()
stop.wheelL = stop.wheelR = 0
rospy.sleep(0.03) # solution to avoid wheel_control_output collisions
# there is some latency from Pid to wheel control output
# which was leading to messages being sent after stop
self.wheels.publish(stop)
rospy.sleep(0.3)
# RIGHT SIDE of the course:
if not self.left:
if self.featuresHit == 0:
self.wheels.publish(-10, 70) # guesses for the left turn
rospy.sleep(2.0) # replace with encoders when ready
self.wheels.publish(stop)
elif self.featuresHit == 1:
self.wheels.publish(-10, 50)
rospy.sleep(1.5)
self.wheels.publish(stop)
self.speed.publish(60) #slow down once we have entered the higher circle
elif self.featuresHit == 2: # First T: pickup stone
self.wheels.publish(stop)
rospy.loginfo("at the T intersection. Robot will be stopped until mode switch is changed.")
rospy.sleep(10)
#self.pickup_stone(RIGHT_FIRST_STONE)
else: #Left side of the course
if self.featuresHit == 0:
self.wheels.publish(50, -10) # guesses for the left turn
rospy.sleep(2.0) # replace with encoders when ready
self.wheels.publish(stop)
elif self.featuresHit == 1:
self.wheels.publish(50, -10)
rospy.sleep(1.5)
self.wheels.publish(stop)
self.speed.publish(60) #slow down once we have entered the higher circle
elif self.featuresHit == 2: # First T: pickup stone
self.wheels.publish(stop)
rospy.loginfo("at the T intersection. Robot will be stopped until mode switch is changed.")
rospy.sleep(30)
#self.pickup_stone(LEFT_FIRST_STONE)
# U-TURN AND TAPE FOLLOW BACK TO FORK TO PLACE STONE (or continue on to get another stone)
self.featuresHit = self.featuresHit + 1
self.pid_enable.publish(True)
self.featureCallback=False
def encoder_left_callback(self, msg): # set switch and reset the featues hit
self.encoder_left = msg.data - self.begin_left
def encoder_right_callback(self, msg): # set switch and reset the featues hit
self.encoder_right = msg.data - self.begin_right
def pickup_stone(self, post_num):
self.base.publish(POSTS[post_num][BASE_INDEX])
self.shoulder.publish(POSTS[post_num][SHOULDER_INDEX])
self.elbow.publish(POSTS[post_num][ELBOW_INDEX])
rospy.sleep(2)
i = POSTS[post_num][SHOULDER_INDEX]
while not self.claw_limit_switch or i < 400:
i += 5
self.shoulder.publish(i)
rospy.sleep(0.05)
self.claw.publish(180, 180) #close
rospy.sleep(0.1)
self.elbow.publish(POSTS[post_num][ELBOW_INDEX] + LIFT_UP_OFFSET) # lift up
rospy.sleep(0.2)
self.shoulder.publish(POSTS[post_num][SHOULDER_INDEX]) # put shoulder back to start position
self.elbow.publish(POSTS[post_num][ELBOW_INDEX]) # put elbow back to start position
# take shoulder and elbow values now
def place_stone(self):
self.base.publish(0)
self.elbow.publish(GAUNTLET[ELBOW_INDEX])
self.shoulder.publish(GAUNTLET[SHOULDER_INDEX])
rospy.sleep(2)
self.claw.publish(0, 0) #open
rospy.sleep(0.5)
self.elbow.publish(GAUNTLET[ELBOW_INDEX])
def refresh(self):
if not self.featureCallback:
rospy.loginfo("Left Encoder: %d, Right Encoder: %d", self.encoder_left, self.encoder_right)
if 1600 - self.encoder_left < 200:
self.pid_enable.publish(False)
rospy.sleep(0.03) # solution to avoid wheel_control_output collisions
self.wheels.publish(0,0)
rospy.loginfo("Stopping! Dead Reckon")
rospy.sleep(2)
self.pid_enable.publish(True)
def cleanup(self):
rospy.sleep(0.03)
self.pid_enable.publish(True)
def control():
rospy.init_node('control_master', anonymous=True)
r = rospy.Rate(10)
master = Master()
rospy.Subscriber('collision', Bool, master.collision_callback, queue_size=1)
rospy.Subscriber('line_feature', Bool, master.fork_analysis_callback, queue_size=1, tcp_nodelay=False)
rospy.Subscriber('left_side', Bool, master.switch_callback)
rospy.Subscriber('limit_left', Bool, master.claw_callback_l)
rospy.Subscriber('limit_right', Bool, master.claw_callback_r)
rospy.Subscriber('encoder_left', Int32, master.encoder_left_callback, queue_size=1)
rospy.Subscriber('encoder_right', Int32, master.encoder_right_callback, queue_size=1)
rospy.on_shutdown(master.cleanup)
#while not rospy.is_shutdown() or True:
# rospy.sleep(2)
# master.pid_enable.publish(False)
# master.send_position(-320, 320)
# master.wheels.publish(0,0)
# master.pid_enable.publish(True)
rospy.spin()
if __name__ == '__main__':
try:
control()
except rospy.ROSInterruptException: pass
|
#-*-coding:utf-8-*-
#__author__='maxiaohui'
from config import config
from test_device.terminal import frDevice
from adb.deviceLogger import getAdbLog
@getAdbLog
def addAdmin(name):
device34=frDevice(config.deviceId)
device34.enterSetting()
device34.addAdmin(name)
addAdmin("李丽丽") |
from flask import Flask, jsonify
from flask_migrate import Migrate
from .model import configure as config_db
from .serializer import configure as config_ma
def create_app():
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
config_db(app)
config_ma(app)
Migrate(app, app.db)
from .usuarios import bp_usuarios
app.register_blueprint(bp_usuarios)
from .ponto import bp_ponto
app.register_blueprint(bp_ponto)
return app |
from rest_framework import status
from rest_framework_jwt.serializers import RefreshJSONWebTokenSerializer
from rest_framework_jwt.views import ObtainJSONWebToken, JSONWebTokenAPIView
from project.serializers import CurrentUserSerializer
from project.utils import LogUtilMixin
from django.contrib.auth import get_user_model
UserModel = get_user_model()
class MyObtainJSONWebToken(ObtainJSONWebToken, LogUtilMixin):
def post(self, request, *args, **kwargs):
response = super(MyObtainJSONWebToken, self).post(request, args, kwargs)
if response.status_code == status.HTTP_200_OK:
user = UserModel.objects.get(username=request.data['username'])
response.data['user'] = CurrentUserSerializer(user).data
return response
class RefreshJSONWebToken(JSONWebTokenAPIView):
"""
API View that returns a refreshed token (with new expiration) based on
existing token
If 'orig_iat' field (original issued-at-time) is found, will first check
if it's within expiration window, then copy it to the new token
"""
serializer_class = RefreshJSONWebTokenSerializer
obtain_jwt_token = MyObtainJSONWebToken.as_view()
refresh_jwt_token = RefreshJSONWebToken.as_view()
|
from flask_restful import abort, Resource
from . import db_session
from .token import Token
from flask import jsonify
def abort_if_user_not_found(app_name):
session = db_session.create_session()
app = session.query(Token).filter(Token.app == app_name).first()
if not app:
abort(404, message=f"App {app_name} not found")
class TokenResource(Resource):
def get(self, app_name):
abort_if_user_not_found(app_name)
session = db_session.create_session()
app = session.query(Token).filter(Token.app == app_name).first()
result = app.token
app.token = 'has_requested'
session.commit()
return result
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provide a System class to encapsulate process execution and reporting
system information (number of CPUs present, etc).
"""
from __future__ import annotations
import multiprocessing
import os
from dataclasses import dataclass
from subprocess import PIPE, STDOUT, CompletedProcess, run as stdlib_run
from typing import Any, Dict, List, Sequence
from typing_extensions import TypeAlias
from . import DEFAULT_PROCESS_ENV
from .logger import LOG
from .ui import shell
SKIPPED_RETURNCODE = -99999
@dataclass(frozen=True)
class CPUInfo:
id: int
@dataclass(frozen=True)
class GPUInfo:
id: int
free: int
ArgList = List[str]
EnvDict: TypeAlias = Dict[str, str]
class System:
"""A facade class for system-related functions.
Parameters
----------
dry_run : bool, optional
If True, no commands will be executed, but a log of any commands
submitted to ``run`` will be made. (default: False)
debug : bool, optional
If True, a log of commands submitted to ``run`` will be made.
(default: False)
"""
def __init__(self, *, dry_run: bool = False, debug: bool = False) -> None:
self.dry_run: bool = dry_run
self.debug = debug
def run(
self,
cmd: Sequence[str],
*,
env: dict[str, Any] | None = None,
cwd: str | None = None,
) -> CompletedProcess[str]:
if self.dry_run or self.debug:
LOG.record(shell(" ".join(cmd)))
if self.dry_run:
return CompletedProcess(cmd, SKIPPED_RETURNCODE, stdout="")
return stdlib_run(
cmd, cwd=cwd, env=env, stdout=PIPE, stderr=STDOUT, text=True
)
@property
def cpus(self) -> tuple[CPUInfo, ...]:
"""A list of CPUs on the system."""
return tuple(CPUInfo(i) for i in range(multiprocessing.cpu_count()))
@property
def gpus(self) -> tuple[GPUInfo, ...]:
"""A list of GPUs on the system, including free memory information."""
# This pynvml import is protected inside this method so that in case
# pynvml is not installed, tests stages that don't need gpu info (e.g.
# cpus, eager) will proceed unaffected. Test stages that do require
# gpu info will fail here with an ImportError.
import pynvml # type: ignore[import]
pynvml.nvmlInit()
num_gpus = pynvml.nvmlDeviceGetCount()
results = []
for i in range(num_gpus):
info = pynvml.nvmlDeviceGetMemoryInfo(
pynvml.nvmlDeviceGetHandleByIndex(i)
)
results.append(GPUInfo(i, info.free))
return tuple(results)
@property
def env(self) -> EnvDict:
"""A base default environment used for process exectution."""
env = dict(os.environ)
env.update(DEFAULT_PROCESS_ENV)
return env
|
# Problem name: Permutations
# Description: A permutation of integers 1,2,…,n is called beautiful if there are no adjacent elements whose difference is 1.
# Given n, construct a beautiful permutation if such a permutation exists.
# Strategy: use if-else concept
num=int(input())
if(num==1):
print(1)
elif(num==3 or num==2):
print("NO SOLUTION")
else:
for i in range(1,num+1):
if(i%2==0):
print(i,end=" ")
for i in range(1,num+1):
if(i%2!=0):
print(i,end=" ")
|
import pandas
import numpy
from pprint import pprint
def ID3(df,origina_df,features,target,parent_node_class = None):
#Setting default parent node class as None
#Define end scnarios --> If true, return leaf
#If all target_values have the same value, return this value
if len(numpy.unique(df[target])) <= 1:
return numpy.unique(df[target])[0]
#If empty, return the target feature value in the original set
elif df is None:
target_feature_value = numpy.argmax(numpy.unique(original_df[target],return_counts=True)[1])
return numpy.unique(original_df[target])[target_feature_value]
#If the feature space is empty, return the target feature value of the direct parent node
elif len(features) is 0:
return parent_node_class
#If none of the above is true, tree grows!
else:
#Set the default value for this node
feature_unuique_values = numpy.unique(df[target])
feature_unique_values_count = numpy.unique(df[target],return_counts=True)[1]
parent_node_class = feature_unuique_values[numpy.argmax(feature_unique_values_count)]
#Select the feature which top splits the df
#find infogain for the features in the df
feature_values = []
for feature in features:
feature_values = information_gain(df,feature,target)
top_feature = features[numpy.argmax(feature_values)]
#Create the tree structure.
tree = {top_feature:{}}
#Remove the feature with the top inforamtion gain from the feature space
features = [i for i in features if i != top_feature]
#Grow a branch under the root node for each possible value of the root node feature
for value in numpy.unique(df[top_feature]):
value = value
#Split the df along the value of the feature with the largest information gain and therwith create sub_dfs
sub_df = df.where(df[top_feature] == value).dropna()
#Call the ID3 algorithm for each of those sub_dfs with the new parameters
sub_tree = ID3(sub_df,df,features,target,parent_node_class)
#Add the sub tree, grown from the sub_df to the tree under the root node
tree[top_feature][value] = sub_tree
return(tree)
def calculate_entropy(col):
attributes,counts = numpy.unique(col,return_counts = True)
entropy = 0
for i in range(len(attributes)):
entropy += (-counts[i]/numpy.sum(counts))*numpy.log2(counts[i]/numpy.sum(counts))
return entropy
def information_gain(df,feature_name_split,target_name):
#entropy of total df
entropy_total = calculate_entropy(df[target_name])
##Calculate entropy of the df
#Calculate vals,counts for splits
vals,counts= numpy.unique(df[feature_name_split],return_counts=True)
#Calculate the weighted entropy
entropy_weighted = 0
for i in range(len(vals)):
probability = counts[i]/numpy.sum(counts)
scenario_entropy = calculate_entropy(df.where(df[feature_name_split]==vals[i]).dropna()[target_name])
entropy_weighted += (probability*scenario_entropy)
#info gain
Info_Gain = entropy_total - entropy_weighted
return Info_Gain
def predict(query,tree,default = 1):
for i in list(query.keys()):
if i in list(tree.keys()):
try:
result = tree[i][query[i]]
except:
return default
result = tree[i][query[i]]
if isinstance(result,dict):
return predict(query,result)
else:
return result
def df_split(df):
#redo indexes with reset_index
training = df.iloc[:85]
testing = df.iloc[85:]
return training,testing
def test(df,tree):
#convert df to dict
query = df.iloc[:,:-1].to_dict(orient = "records")
#store predictions
predicted = pandas.DataFrame(columns=["predict"])
#predict + calculate prediction % accuracy
for i in range(len(df)):
predicted.loc[i,"predict"] = int(predict(query[i],tree,1.0))
print("Row" + str(i) + " class: " + str(df.loc[i,"class"]))
print("Row"+ str(i) + " prediction: " + str(predicted.loc[i,"predict"]))
#save predictions
final_predictions = predicted["predict"]
df.join(final_predictions).to_csv('predictions.csv')
correct_predictions = (numpy.sum(predicted["predict"] == df["class"])/len(df))*100
print("Total rows: " , len(df) )
print("Total predicted correctly: ", numpy.sum(predicted["predict"] == df["class"]))
print('The prediction accuracy is: ', correct_predictions, '%' )
def main():
#import the df
df = pandas.read_csv('animal_classifier.csv',
names=['animal_name','hair','feathers','eggs','milk',
'airbone','aquatic','predator','toothed','backbone',
'breathes','venomous','fins','legs','tail','domestic','catsize','class',])
#drop name as it is not a classifier
df=df.drop('animal_name',axis=1)
data_split = df_split(df)
training_df = data_split[0].reset_index(drop=True)
testing_df = data_split[1].reset_index(drop=True)
tree = ID3(training_df,training_df,training_df.columns.values.tolist(),"class")
pprint(tree)
test(testing_df,tree)
if __name__ == "__main__":
main() |
# dataset, labels = data.from_file()
#
# model.fully_connected(dataset, labels, [20, 40, 60, 20, 12, 6], 50001)
from learn import data
from learn import model
if __name__ == '__main__':
dataset, labels = data.get_dataset()
model.fully_connected_(dataset, labels, [20, 40, 60, 20, 16], 100001) |
import torch
from torch import nn
from torch.nn import functional as F
from typing import Optional
def label_smoothed_nll_loss(
lprobs: torch.Tensor, target: torch.Tensor, epsilon: float, ignore_index=None, reduction="mean", dim=-1
) -> torch.Tensor:
"""
Source: https://github.com/pytorch/fairseq/blob/master/fairseq/criterions/label_smoothed_cross_entropy.py
:param lprobs: Log-probabilities of predictions (e.g after log_softmax)
:param target:
:param epsilon:
:param ignore_index:
:param reduction:
:return:
"""
if target.dim() == lprobs.dim() - 1:
target = target.unsqueeze(dim)
if ignore_index is not None:
pad_mask = target.eq(ignore_index)
target = target.masked_fill(pad_mask, 0)
nll_loss = -lprobs.gather(dim=dim, index=target)
smooth_loss = -lprobs.sum(dim=dim, keepdim=True)
# nll_loss.masked_fill_(pad_mask, 0.0)
# smooth_loss.masked_fill_(pad_mask, 0.0)
nll_loss = nll_loss.masked_fill(pad_mask, 0.0)
smooth_loss = smooth_loss.masked_fill(pad_mask, 0.0)
else:
nll_loss = -lprobs.gather(dim=dim, index=target)
smooth_loss = -lprobs.sum(dim=dim, keepdim=True)
nll_loss = nll_loss.squeeze(dim)
smooth_loss = smooth_loss.squeeze(dim)
if reduction == "sum":
nll_loss = nll_loss.sum()
smooth_loss = smooth_loss.sum()
if reduction == "mean":
nll_loss = nll_loss.mean()
smooth_loss = smooth_loss.mean()
eps_i = epsilon / lprobs.size(dim)
loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
return loss
class SoftCrossEntropyLoss(nn.Module):
__constants__ = ["reduction", "ignore_index", "smooth_factor"]
def __init__(
self,
reduction: str = "mean",
smooth_factor: Optional[float] = None,
ignore_index: Optional[int] = -100,
dim: int = 1,
):
"""Drop-in replacement for torch.nn.CrossEntropyLoss with label_smoothing
Args:
smooth_factor: Factor to smooth target (e.g. if smooth_factor=0.1 then [1, 0, 0] -> [0.9, 0.05, 0.05])
Shape
- **y_pred** - torch.Tensor of shape (N, C, H, W)
- **y_true** - torch.Tensor of shape (N, H, W)
Reference
https://github.com/BloodAxe/pytorch-toolbelt
"""
super().__init__()
self.smooth_factor = smooth_factor
self.ignore_index = ignore_index
self.reduction = reduction
self.dim = dim
def forward(self, y_pred: torch.Tensor, y_true: torch.Tensor) -> torch.Tensor:
log_prob = F.log_softmax(y_pred, dim=self.dim)
return label_smoothed_nll_loss(
log_prob,
y_true,
epsilon=self.smooth_factor,
ignore_index=self.ignore_index,
reduction=self.reduction,
dim=self.dim,
) |
import os
import csv
import psycopg2
csv.field_size_limit(100000000)
db_name = os.environ["POSTGRES_DB"]
db_user = os.environ["POSTGRES_USER"]
db_psw = os.environ["POSTGRES_PASSWORD"]
conn = psycopg2.connect(
"host=/var/run/postgresql/ dbname={} user={} password={}".format(
db_name, db_user, db_psw))
cur = conn.cursor()
# Categories
with open('./data/categories_table.csv', 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
cur.execute(
"INSERT INTO categories VALUES (%s, %s, %s, %s, %s)", row)
conn.commit()
print("Categories table populated")
# Attributes
with open('./data/attributes_table.csv', 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
cur.execute(
"INSERT INTO attributes VALUES (%s, %s, %s, %s)", row)
conn.commit()
print("Attributes table populated")
# Images
with open('./data/images_table.csv', 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
cur.execute(
"INSERT INTO images VALUES (%s, %s, %s, %s, %s)", row)
conn.commit()
print("Images table populated")
# Segmentation
with open('./data/segmentation_table.csv', 'r') as f:
reader = csv.reader(f)
next(reader)
for row in reader:
cur.execute(
"INSERT INTO segmentation (img_id, encoded_pixels, class_id, attribute_id) VALUES (%s, %s, %s, %s)", row)
conn.commit()
print("Segmentation table populated")
cur.close()
conn.close()
print("Connection closed")
|
from selectorlib import Extractor
import requests
import json
ext = Extractor.from_yaml_file('css_format.yml')
def scrape(url):
headers = {
'dnt': '1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'same-origin',
'sec-fetch-mode': 'navigate',
'sec-fetch-user': '?1',
'sec-fetch-dest': 'document',
'referer': 'https://www.amazon.com/',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8',
}
print("Downloading %s"%url)
r = requests.get(url, headers=headers)
return ext.extract(r.text)
print("What are you are looking for!!!!")
item=input()
a = "https://www.amazon.com/s?k=" +item
data=scrape(a)
if data:
for product in data['products']:
product['search_url'] = a
print("Product: %s"%product['title'])
print(json.dumps(product, indent=4)) |
import re
import pytest
from unittest import mock
import builtins
def nested_lists():
allscores = set()
allnames = []
for i in range(int(input())):
list = []
name = input()
score = float(input())
list.append(name)
list.append(score)
allscores.add(score)
allnames.append(list)
allnames.sort(key=lambda x: x[1])
list = []
l = sorted(allscores, reverse=True)
first = l.pop()
second = l.pop()
for i in range(len(allnames)):
if second == allnames[i][1]:
list.append(allnames[i][0])
list.sort()
return list
def test_nested_lists_1():
with mock.patch.object(builtins, 'input', side_effect=['5', 'Harry','37.21','Berry','37.21','Tina','37.2','Akriti','41','Harsh','39']):
assert nested_lists() == ['Berry','Harry']
def test_nested_lists_2():
with mock.patch.object(builtins, 'input', side_effect=['4', 'Prashant','32','Pallavi','36','Dheeraj','39','Shivam','40']):
assert nested_lists() == ['Pallavi']
def test_nested_lists_3():
with mock.patch.object(builtins, 'input', side_effect=[ '5', 'Harsh','20','Berria','20','Varun','19','Kakunami','19','Vikas','21']):
assert nested_lists() == ['Berria','Harsh']
|
import pygame
import os
import time
import random
pygame.init()
# Maximum height and width of the game surface
WIDTH, HEIGHT = (750, 750)
# To create the display surface
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
# Set the surface caption
pygame.display.set_caption("MyGame")
# Background image
BG = pygame.image.load(os.path.join("assets", "background-black.png"))
# Scaling the background image to max width and height as game surface
BG = pygame.transform.scale(BG, (WIDTH, HEIGHT))
# Enemy Load image
RED_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_red_small.png"))
GREEN_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_green_small.png"))
BLUE_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_blue_small.png"))
# Player ship image
YELLOW_SPACE_SHIP = pygame.image.load(os.path.join("assets", "pixel_ship_yellow.png"))
# lasers
RED_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_red.png"))
GREEN_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_green.png"))
BLUE_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_blue.png"))
YELLOW_LASER = pygame.image.load(os.path.join("assets", "pixel_laser_yellow.png"))
# Generalized class
class Ship:
COOLDOWN = 30
def __init__(self, x, y, health=100):
self.x = x
self.y = y
self.health = health
self.ship_img = None
self.laser_img = None
# keep track of the lasers shoot
self.lasers = []
self.cool_down_counter = 0
def draw(self, window):
window.blit(self.ship_img, (self.x, self.y))
for laser in self.lasers:
laser.draw(window)
def move_lasers(self, vel, obj):
self.cooldown()
for laser in self.lasers:
laser.move(vel)
if laser.off_screen(HEIGHT):
self.lasers.remove(laser)
elif laser.collision(obj):
obj.health -= 10
self.lasers.remove(laser)
# used to initiate time to control of the next laser shooting time
def cooldown(self):
# if cool_down_counter exceed the COOL DOWN =30 --> allow to create laser
if self.cool_down_counter >= self.COOLDOWN:
self.cool_down_counter = 0
# increment of the cool_down_counter
elif self.cool_down_counter > 0:
self.cool_down_counter += 1
# used to initiate time for new laser
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def get_height(self):
return self.ship_img.get_width()
def get_width(self):
return self.ship_img.get_height()
class Laser:
def __init__(self, x, y, img):
self.x = x
self.y = y
self.img = img
self.mask = pygame.mask.from_surface(self.img)
def draw(self, window):
window.blit(self.img, (self.x, self.y))
# moves the laser to the certain velocity ratio
def move(self, vel):
self.y += vel
# check if the laser is off the screen
# for player it checks laser y position > 0
# for enemy it checks laser y position < HEIGHT
def off_screen(self, height):
return not(self.y <= height and self.y >= 0)
def collision(self, obj):
return collide(self, obj)
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship ---- > ship.shoot()
'''
# Player class
class Player(Ship):
# Takes the x and y position to located the player character
def __init__(self, x, y, health=100):
super().__init__(x, y)
self.ship_img = YELLOW_SPACE_SHIP
self.laser_img = YELLOW_LASER
# masking take only the weighted pixel and ignore the other pixel
self.mask = pygame.mask.from_surface(self.ship_img)
self.max_health = health
# Shoot the laser when the user press the space bar
def move_lasers(self, vel, objs):
self.cooldown()
# Loop over the laser shoot by the player
for laser in self.lasers:
# Change the x and y pos of the laser
laser.move(vel)
if laser.off_screen(HEIGHT):
# If the laser is out off the screen -- destroy the laser object
self.lasers.remove(laser)
else:
for obj in objs:
if laser.collision(obj):
objs.remove(obj)
if laser in self.lasers:
self.lasers.remove(laser)
# Render the player object to the game surface ---> responsible for the movement of the character
def draw(self, window):
super().draw(window)
self.healthbar(window)
def healthbar(self, window):
pygame.draw.rect(window, (255, 0, 0),(self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width(), 10))
pygame.draw.rect(window, (0, 255, 0), (self.x, self.y + self.ship_img.get_height() + 10, self.ship_img.get_width() * (self.health / self.max_health),10))
'''
Enemy();
move()
shoot() ---> Laser()
move_laser()
Ship() ---> draw()
'''
class Enemy(Ship):
COLOR_MAP = {
"red": (RED_SPACE_SHIP, RED_LASER),
"blue": (BLUE_SPACE_SHIP, BLUE_LASER),
"green": (GREEN_SPACE_SHIP, GREEN_LASER)
}
def __init__(self, x, y, color, health=100):
super().__init__(x, y, health)
self.ship_img, self.laser_img = self.COLOR_MAP[color]
self.mask = pygame.mask.from_surface(self.ship_img)
def move(self, vel):
self.y += vel
def shoot(self):
if self.cool_down_counter == 0:
laser = Laser(self.x-20, self.y, self.laser_img)
self.lasers.append(laser)
self.cool_down_counter = 1
def main():
# Flag to track the game status
run = True
# frame to be rendered per second
FPS = 60
# pygame clock initialisation
clock = pygame.time.Clock()
# Initial level of the game
level = 0
# Total lives of the player
lives = 5
# Font surface to render the level and lives
main_font = pygame.font.SysFont('comicsans', 50)
# Font surface to render the lost message
lost_font = pygame.font.SysFont('comicsans', 60)
# Player declaration
player = Player(375, 600)
# Player movement velocity
player_vel = 5
# laser movement velocity
laser_vel = 5
# Track of total enemy created
enemies = []
# Update number of enemy after a level
wave_length = 0
# Enemy spaceship velocity
enemy_vel = 1
# Flag to Tracking the game status of the player on basis of the health
lost = False
# Counting the lost
lost_count = 0
# Function to render the game objects onto the game surface
def render_window():
# Creating the font surface to render onto the game surface
# For Lives rendering
lives_label = main_font.render(f"Lives : {lives}", 1, (255, 255, 255))
# For Level rendering
level_label = main_font.render(f"Level : {level}", 1, (255, 255, 255))
# blit the background image to the game surface
WIN.blit(BG, (0, 0))
# blit the lives status to the game screen/surface
WIN.blit(lives_label, (10, 10))
# blit the level status to the game screen/surface
WIN.blit(level_label, (WIDTH - level_label.get_width() - 10, 10))
# Rendering the player character to the surface
player.draw(WIN)
# TO render the enemy onto the game surface
# This will draw the enemy if exist in the enemies list
for enemy in enemies:
# Calling the Enemy.draw function of the Enemy class
enemy.draw(WIN)
# If the lost flag is toggled ---> player lost
if lost:
# Creating the lost font surface to be rendered on the screen after the lost of the game
lost_label = lost_font.render("You Lost!!", 1, (255, 255, 255))
# Render the lost font surface to the game surface
WIN.blit(lost_label, (WIDTH/2 - lost_label.get_width()/2, 350))
# used to update the whole screen per frame
pygame.display.update()
def player_activity():
# Used to get the activity of the user/player
keys = pygame.key.get_pressed()
# <-- left movement
if keys[pygame.K_LEFT] and player.x - player_vel > 0:
player.x -= player_vel
# --> right
if keys[pygame.K_RIGHT] and player.x + player_vel + player.get_width() < WIDTH:
player.x += player_vel
# ^^^^^ up movement
if keys[pygame.K_UP] and player.y - player_vel > 0:
player.y -= player_vel
# Down movement
if keys[pygame.K_DOWN] and player.y + player_vel + player.get_height() + 10 < HEIGHT:
player.y += player_vel
# Used to fire the laser shoots
if keys[pygame.K_SPACE]:
player.shoot()
# Main Game Loop
while run:
# sets the number frame to be loaded per second and run this loop 60 time per second
clock.tick(FPS)
# used to render all the surfaces onto the screen
render_window()
# Check to track the game status as lost or win
if lives <= 0 or player.health <= 0:
# Toggle the lost flag
lost = True
# increase the lost count
lost_count += 1
# if the player lost toggle the game(run) for 3 seconds
if lost:
# to display the lost font surface for 3 seconds
if lost_count > FPS * 3:
run = False
else:
continue
# Used to get the activity of the user/player
for event in pygame.event.get():
# Trigger when the QUIT is pressed
if event.type == pygame.QUIT:
# run = False
quit()
print(event)
# To level up when NO enemy left
if len(enemies) == 0:
# Level up by 1
level += 1
# adding 5 additional enemy
wave_length += 5
# Declaration of enemy as per wave_length
for i in range(wave_length):
enemy = Enemy(random.randrange(50, WIDTH - 100),
random.randrange(-1500, -100),
random.choice(["red", "blue", "green"]))
enemies.append(enemy)
player_activity()
'''
Player():
draw() --> ship.draw()
Move_laser() --> ship.cool_down()
health_bar()
Ship() ---- > ship.shoot()
Enemy():
move()
shoot() ---> Laser()
Ship() ---> move_laser()
Ship() ---> draw()
'''
for enemy in enemies[:]:
# moving enemy itself
enemy.move(enemy_vel)
# moving enemy laser
enemy.move_lasers(laser_vel, player)
# setting the probability to shoot a laser
if random.randrange(0, 2 * 60) == 1:
enemy.shoot()
# Check for collision of the enemy and the player
if collide(enemy, player):
# if collide decrease the player health by 10
player.health -= 10
# Deleting the enemy who collide with the player
enemies.remove(enemy)
# destroying the enemy if the enemy passes the MAX_HEIGHT
elif enemy.y + enemy.get_height() > HEIGHT:
lives -= 1
enemies.remove(enemy)
# used to fire the laser and check the collision of the player laser with the enemy object
player.move_lasers(-laser_vel, enemies)
# check if the objects collide or not
def collide(obj1, obj2):
offset_x = obj2.x - obj1.x
offset_y = obj2.y - obj1.y
return obj1.mask.overlap(obj2.mask, (offset_x, offset_y)) != None
def main_menu():
# Initialisation of the font surface
title_font = pygame.font.SysFont("comicsans", 70)
# Used to show main menu after complietion of the game
run = True
while run:
# Blit the background surface to the screen surface
WIN.blit(BG, (0, 0))
# Setting the font to be rendered on the font surface
title_label = title_font.render("Press the mouse to begin...", 1, (255, 255, 255))
# blit the font on the game surface
WIN.blit(title_label, (WIDTH/2 - title_label.get_width()/2, 350))
# used to update the screen surface at every second according to the FPS
pygame.display.update()
# loop to handle the start or the close of the game
for event in pygame.event.get():
# triggered when the game screen is closed --> close the game
if event.type == pygame.QUIT:
run = False
# Triggered when the mouse is clicked --> Start game
if event.type == pygame.MOUSEBUTTONDOWN:
main()
# Quit the pygame instance
pygame.quit()
# Starting the game main menu
main_menu() |
'''
Created on Jun 18, 2013
@author: jsflax
'''
|
import pandas as pd
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
"""take structure factor and hydrophobic factor as input value for
our kernel function"""
tm = pd.read_csv("merged_file.csv")
df = tm.drop_duplicates()
train_data = []
label = []
id = []
for each in df.values:
train_data.append(each[1:4].tolist())
id.append(each[0])
label.append(each[-1])
"""
this part is for generate the wrongly classified proteins
"""
# clf = svm.SVC(kernel='poly',C = 10000)
# scores = cross_val_score(clf,train_data,label,cv=10)
# print("Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2))
# print(id)
# clf = SVC(kernel='linear',C = 10)
# clf.fit(train_data, label)
# predicted_label = clf.predict(train_data)
# for x,y,id in zip(label,predicted_label,id):
# if x != y:
# print(x,y,id)
"""
cross validation for best parameters
"""
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], 'C': [1, 10, 100, 1000]}, {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(), tuned_parameters, cv=10,
scoring='%s_macro' % score)
clf.fit(train_data, label)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
means = clf.cv_results_['mean_test_score']
stds = clf.cv_results_['std_test_score']
for mean, std, params in zip(means, stds, clf.cv_results_['params']):
print("%0.3f (+/-%0.03f) for %r"
% (mean, std * 2, params))
|
import logging
from six.moves import input
from django.core.management import BaseCommand, CommandError, call_command
from elasticsearch_dsl import connections
from stretch import stretch_app
class Command(BaseCommand):
"""
Create or Update the Elasticsearch Indices from Stretch Indices
"""
can_import_settings = True
def add_arguments(self, parser):
parser.add_argument(
'--indices',
action='append',
default=list(),
help='One or more indices to operate on, by index name'
)
def handle(self, *args, **options):
call_command('stretch', 'update_indices', indices=options.get('indices'))
|
from peewee import (SqliteDatabase, Model, CharField, IntegerField, TextField
)
db = SqliteDatabase('matches.db')
class Match(Model):
# each match create with this first block of data
# data is in player matchlist
platformId = CharField()
gameId = IntegerField(unique=True)
champion = IntegerField()
queue = IntegerField()
season = IntegerField()
timestamp = IntegerField()
role = CharField()
lane = CharField()
# this is only accesible when searched by gameid
# add only if needed
# this may need: default = None
gameCreation = IntegerField(null = True)
gameDuration = IntegerField(null = True)
queueId = IntegerField(null = True)
mapId = IntegerField(null = True)
seasonId = IntegerField(null = True)
gameVersion = CharField(null = True)
gameMode = CharField(null = True)
gameType = CharField(null = True)
teams = TextField(null = True)
participants = TextField(null = True)
participantIdentities = TextField(null = True)
class Meta:
database = db
legacy_table_names=False
indexes = (
(('platformId', 'gameId'), True),
)
def initialize():
"""Create the database and the table if they don't exist."""
db.connect()
db.create_tables([Match], safe=True)
if __name__ == '__main__':
initialize()
|
# -*- encoding: utf-8 -*-
from __future__ import absolute_import
from collections import Mapping
from flask.ext.mail import Message
from flamaster.extensions import mail, mongo
from mongoengine import StringField, ListField, EmailField, FileField
from .decorators import classproperty
from .utils import plural_underscored
class BaseMixin(object):
def as_dict(self, include=None, exclude=['password']):
""" method for building dictionary for model value-properties filled
with data from mapped storage backend
"""
document_fields = self._fields.keys()
exportable_fields = (include or []) + document_fields
exclude = ['_ns', '_int_id', '_class'] + (exclude or [])
exportable_fields = set(exportable_fields) - set(exclude)
# convert undescored fields:
result = dict()
for field in exportable_fields:
value = getattr(self, field)
if hasattr(value, '__call__'):
value = value()
result[field] = value
return result
@classmethod
def create(cls, **kwargs):
return cls(**kwargs).save()
def update(self, **kwargs):
instance = self._setattrs(**kwargs)
if isinstance(self, mongo.Document):
return instance.save()
else:
return instance
def _setattrs(self, **kwargs):
for k, v in kwargs.iteritems():
if k.startswith('_'):
raise ValueError('Underscored values are not allowed')
setattr(self, k, v)
return self
@classmethod
def convert(cls, data):
""" Create GrouponDeal instance from dict or return unchanged if
already
params:
:data_dict: `dict` or `GrouponDeal`:class: instance
returns:
`GrouponDeal`:instance:
"""
if isinstance(data, cls):
return data
elif isinstance(data, Mapping):
return cls(**data)
else:
return None
class DocumentMixin(BaseMixin):
""" Base mixin for all mongodb models
"""
@classproperty
def __meta__(cls):
return {
'collection': plural_underscored(cls.__name__)
}
class StoredMail(DocumentMixin, mongo.Document):
subject = StringField(required=True)
recipients = ListField(EmailField())
attachments = ListField()
html_body = StringField()
text_body = StringField()
def send(self):
msg = Message(self.subject, recipients=list(self.recipients),
body=self.text_body, html=self.html_body)
if self.attachments:
for file_id in self.attachments:
file_instance = FileModel.find_one(id=file_id)
msg.attach(file_instance.name,
file_instance.image.contentType,
file_instance.image.read()
)
mail.send(msg)
self.delete()
class FileModel(mongo.Document, DocumentMixin):
""" Wrapper around MongoDB gridfs session and file storage/retrieve
actions
"""
image = FileField(required=True)
name = StringField()
def __unicode__(self):
return self.name
@classmethod
def store(cls, image, content_type, **kwargs):
instance = cls(name=kwargs.get('name'))
instance.image.put(image, content_type=content_type)
instance.save()
return instance
@classmethod
def create(cls, image, content_type, **kwargs):
return cls.store(image, content_type, **kwargs)
@classmethod
def get(cls, id):
""" Get mognodb stored file by its unique identifier
"""
instance = cls.objects(pk=id).get_or_404()
return instance.image
@classmethod
def find_one(cls, **kwargs):
return cls.objects(**kwargs).first()
def get_file(self):
""" Return file-like object bound to this class from the gridfs storage
"""
return self.image
|
import json
import socket
import hashlib
import rsa
class Connector:
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(0.5)
self.server_ip = 'localhost'
self.port = 2137
self.login = ''
self.token = -1
self.server_public_key = None
self.my_public_key = None
self.my_private_key = None
(self.my_public_key, self.my_private_key) = rsa.newkeys(2048)
self.get_server_public_key()
print(self.server_public_key)
def get_server_public_key(self):
public_key = self.send_message_to_server({'command': 'get_public_key',
'client_public_key': [self.my_public_key.n,
self.my_public_key.e]})['public_key']
self.server_public_key = rsa.PublicKey(public_key[0], public_key[1])
def set_token(self, _token):
self.token = _token
def send_message_to_server(self, data_to_send):
if self.server_public_key is None:
self.sock.sendto(json.dumps(data_to_send).encode('utf-8'), (self.server_ip, self.port))
else:
try:
self.sock.sendto(rsa.encrypt(json.dumps(data_to_send).encode('utf-8'), self.server_public_key),
(self.server_ip, self.port))
except:
self.sock.sendto(json.dumps(data_to_send).encode('utf-8'), (self.server_ip, self.port))
try:
data, address = self.sock.recvfrom(1024)
except:
return self.send_message_to_server(data_to_send)
if self.server_public_key is None:
print('t')
return json.loads(data.decode('utf-8'))
else:
print('a')
try:
return json.loads(rsa.decrypt(data, self.my_private_key).decode('utf-8'))
except:
return json.loads(data.decode('utf-8'))
def log_in(self, login, password):
return self.send_message_to_server({
"command": "login",
"login": login,
"password": hashlib.sha512(password.encode()).hexdigest()
})
def registration(self, login, password, email):
return self.send_message_to_server({
"command": "registration",
"login": login,
"password": hashlib.sha512(password.encode()).hexdigest(),
"email": email
})
def log_out(self):
return self.send_message_to_server({
"command": "logout",
"login": self.login,
"token": self.token
})
def get_list_of_friends(self):
return self.send_message_to_server({
"command": "get_list_of_friends",
"login": self.login,
"token": self.token
})
def forgot_password(self, login):
return self.send_message_to_server({
"command": "forgot_password",
"login": login
})
def change_password(self, login, code, new_password):
return self.send_message_to_server({
"command": "change_password",
"login": login,
"code": code,
"new_password": new_password
})
def delete_friend(self, friend_login):
return self.send_message_to_server({
"command": "remove_friend",
"login": self.login,
"token": self.token,
"friend_login": friend_login
})
def invite_friend(self, friend_login):
return self.send_message_to_server({
"command": "invite_friend",
"login": self.login,
"token": self.token,
"friend_login": friend_login
})
def get_message(self):
return self.send_message_to_server({
"command": "get_message",
"login": self.login,
"token": self.token
})
def accept_invite(self, friend_login):
return self.send_message_to_server({
"command": "accept_invite",
"login": self.login,
"token": self.token,
"friend_login": friend_login
})
def send_message_to_friend(self, friend_login, message):
return self.send_message_to_server({
"command": "send_message_to_friend",
"login": self.login,
"token": self.token,
"friend_login": friend_login,
'message': message
})
|
import logging
import config
from aiogram import Bot, Dispatcher, executor
from aiogram.contrib.fsm_storage.redis import RedisStorage
logging.basicConfig(level=logging.INFO)
storage = RedisStorage(host='localhost', port=6379)
bot = Bot(token=config.API_TOKEN)
dp = Dispatcher(bot, storage=storage)
|
"""A simple example of asyncio. This code is available from python 3.7.
References
- history of asyncio: https://asyncio-notes.readthedocs.io/en/latest/asyncio-history.html
- asyncio: https://docs.python.org/3/library/asyncio.html
- asyncio.queue: https://docs.python.org/3.7/library/asyncio-queue.html
- producer-consumer: https://stackoverflow.com/questions/52582685/using-asyncio-queue-for-producer-consumer-flow
"""
import asyncio
import random
async def mock_worker(queue: asyncio.Queue):
"""Get items from the queue and execute tasks."""
print(f"start worker")
while True:
task = await queue.get()
queue.task_done()
print(f"worker finished task {task}")
async def mock_receiver(queue: asyncio.Queue):
"""Make mock data and put it in the queue."""
print(f"start receiver")
while True:
item = random.random() # assume it receives any data
await asyncio.sleep(2)
await queue.put(item)
print(f"receiver has put {item} in Queue")
async def main():
"""Create worker and receiver and wait until they are done."""
queue = asyncio.Queue()
worker = asyncio.create_task(mock_worker(queue))
receiver = asyncio.create_task(mock_receiver(queue))
tasks = [worker, receiver]
await asyncio.gather(*tasks) # wait until all tasks are done
print("main() is done")
# execute!
asyncio.run(main()) |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 5 11:14:59 2019
@author: Reuben
"""
from .variable import Store, Aliases
class Name_Space(dict):
def __init__(self, obj_cls=Store):
self.i = 0
self.obj_cls = obj_cls
self._active = None
def __missing__(self, key):
return self.create(key)
def create(self, key):
new = self.obj_cls(name=key)
self[key] = new
return new
def get(self, key=None):
if key is None:
key = self.i
self.i += 1
if key not in self:
return self.create(key)
return self[key]
def set_active(self, key):
if key in self or key is None:
self._active_container = key
else:
raise KeyError(key + " not found in name space.")
def active(self):
return self[self._active]
def duplicate(self, key, new_key):
self[new_key] = self[key].copy()
return self[new_key]
stores = Name_Space(Store)
aliases = Name_Space(Aliases)
def get_store(name=None):
return stores.get(name)
def get_aliases(name=None):
return aliases.get(name)
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, with_statement
from revolver.core import run
from revolver import contextmanager as ctx
from revolver import directory as dir
from revolver import package, file
def install():
package.ensure("git-core")
if not dir.exists(".rbenv"):
run("git clone git://github.com/sstephenson/rbenv.git .rbenv")
else:
with ctx.cd(".rbenv"):
run("git pull")
_ensure_autoload(".bashrc")
_ensure_autoload(".zshrc")
def ensure():
if not dir.exists(".rbenv"):
install()
def _ensure_autoload(filename):
file.append(filename, 'export PATH="$HOME/.rbenv/bin:$PATH"')
file.append(filename, 'eval "$(rbenv init -)"')
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides the data access object (DAO) for Organizations."""
import json
from google.cloud.security.common.data_access import dao
from google.cloud.security.common.data_access.sql_queries import select_data
from google.cloud.security.common.gcp_type import organization
from google.cloud.security.common.util import log_util
LOGGER = log_util.get_logger(__name__)
class OrganizationDao(dao.Dao):
"""Data access object (DAO) for Organizations."""
def get_organizations(self, resource_name, timestamp):
"""Get organizations from snapshot table.
Args:
resource_name (str): The resource name.
timestamp (str): The timestamp of the snapshot.
Returns:
list: A list of Organizations.
"""
query = select_data.ORGANIZATIONS.format(timestamp)
rows = self.execute_sql_with_fetch(resource_name, query, ())
orgs = []
for row in rows:
org = organization.Organization(
organization_id=row['org_id'],
display_name=row['display_name'],
lifecycle_state=row['lifecycle_state'])
orgs.append(org)
return orgs
def get_organization(self, org_id, timestamp):
"""Get an organization from the database snapshot.
Args:
org_id (int): The Organization to retrieve.
timestamp (str): The timestamp of the snapshot.
Returns:
Organization: An Organization from the database snapshot.
Raises:
MySQLError: If there was an error getting the organization.
"""
query = select_data.ORGANIZATION_BY_ID.format(timestamp)
rows = self.execute_sql_with_fetch('organization', query, (org_id,))
if rows:
return organization.Organization(
organization_id=rows[0]['org_id'],
display_name=rows[0]['display_name'],
lifecycle_state=rows[0]['lifecycle_state'])
return None
def get_org_iam_policies(self, resource_name, timestamp):
"""Get the organization policies.
This does not raise any errors if there's a database or json parse
error because we want to return as many organizations as possible.
Args:
resource_name (str): The resource name.
timestamp (str): The timestamp of the snapshot.
Returns:
dict: A dict that maps organizations
(gcp_type.organization.Organization) to their
IAM policies (dict).
"""
org_iam_policies = {}
query = select_data.ORG_IAM_POLICIES.format(timestamp)
rows = self.execute_sql_with_fetch(resource_name, query, ())
for row in rows:
try:
org = organization.Organization(
organization_id=row['org_id'])
iam_policy = json.loads(row.get('iam_policy', {}))
org_iam_policies[org] = iam_policy
except ValueError:
LOGGER.warn(
'Error parsing json:\n %s', row.get('iam_policy'))
return org_iam_policies
|
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.conf import settings
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', 'app.views.index'),
# Examples:
url(r'^about/$', 'app.views.about'),
url(r'^category/(?P<category_name>[-\w\d]+)/$', 'app.views.category'),
url(r'^profile/(?P<slug>[-\w\d]+)/$', 'app.views.profile'),
url(r'^calendar/$', 'app.views.calendar'),
url(r'^calendar/(?P<category_name>[-\w\d]+)/$', 'app.views.events_view'),
# url(r'^app/', include('app.foo.urls')),
url(r'^accounts/', include('institutions.urls')),
url(r'^signin/$', 'institutions.views.signin'),
url(r'^signup/$', 'institutions.views.signup'),
url(r'^dashboard/$', 'app.views.dashboard'),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^user_validation/$', 'app.views.user_validation'),
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# password recovery urls
url(r'^recover/',include('password_reset.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
# Uncomment the next line to serve media files in dev.
# urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
#if settings.DEBUG:
# import debug_toolbar
# urlpatterns += patterns('',
# url(r'^__debug__/', include(debug_toolbar.urls)),
# )
if settings.DEBUG:
# static files (images, css, javascript, etc.)
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': settings.MEDIA_ROOT})) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from django.test import TestCase
from common.models import Injection
class InfoTest(TestCase):
def test_info_page_renders_info_page_template(self):
response = self.client.get('/info/')
self.assertTemplateUsed(response, 'info/info.html')
def test_info_page_displays_all_medications(self):
tram = Injection.objects.create(name='Tramadol')
ampi = Injection.objects.create(name='Ampicillin')
response = self.client.get('/info/')
self.assertIn(tram, response.context['inj'])
self.assertIn(ampi, response.context['inj'])
class InfoInjTest(TestCase):
def test_rx_page_url_corresponds_to_rx_slug(self):
inj = Injection.objects.create(name='Super Tramadol (Nighttime)')
response = self.client.get('/info/inj/{}/'.format(inj.slug))
self.assertEqual(200, response.status_code)
def test_rx_info_renders_rx_info_template(self):
Injection.objects.create(name='Super Tramadol Nighttime')
response = self.client.get('/info/inj/super-tramadol-nighttime/')
self.assertTemplateUsed(response, 'info/rx.html')
|
import numpy as np
import pandas as pb
# Use the motion parameters to fid the bad brains
# Use the qc_csv to find the bad brains
# Motion Based outliers
def read_par_file(motion_params_file):
volumes_count = 0
trans_x = []
trans_y = []
trans_z = []
rot_x = []
rot_y = []
rot_z = []
with open(motion_params_file) as f:
for line in f:
line = line.split(' ')
# print(line)
trans_x.append(float(line[6]))
trans_y.append(float(line[8]))
trans_z.append(float(line[10]))
rot_x.append(float(line[0]))
rot_y.append(float(line[2]))
rot_z.append(float(line[4]))
volumes_count = volumes_count + 1
return trans_x, trans_y, trans_z, rot_x, rot_y, rot_z, volumes_count
def motion_outliers(motion_params_file, motion_threshold,\
outliers_percentage_threshold = 30):
motion_params_paths = np.load(motion_params_npy)
outliers_sub_ids = []
for subject_param_path in motion_params_paths:
sub_id = subject_param_path.split('/')[-1].split('_')[0].split('-')[1]
trans_x, trans_y, trans_z, rot_x, rot_y, rot_z, volumes_count = \
read_par_file(subject_param_path)
params = np.array([trans_x, trans_y, trans_z, rot_x, rot_y, rot_z]).T
outlier_idx = np.where(np.abs(params) > motion_threshold)
num_outlier_entries = len(set(outlier_idx[0]))
percentage_outlier_entries = num_outlier_entries * 100 / volumes_count
# if num_outlier_entries >= 10:
if percentage_outlier_entries > outliers_percentage_threshold:
print('Subject %s with %s Outliers has %s percent outliers'%(sub_id,\
num_outlier_entries,percentage_outlier_entries ))
outliers_sub_ids.append(sub_id)
return outliers_sub_ids
def csf_wm_outliers(qc_csv_file, csf_thresh, wm_thresh):
outliers_sub_id = []
wm_csf_qc_df = pb.read_csv(qc_csv_file)
wm_csf_qc_mat = wm_csf_qc_df[['sub_id','quality_csf','quality_wm']].values
for i in range(wm_csf_qc_mat.shape[0]):
if wm_csf_qc_mat[1] < csf_thresh:
print('Subject %s has a very poor CSF quality of %s'%(wm_csf_qc_mat[0],wm_csf_qc_mat[1]))
outliers_sub_id.append(wm_csf_qc_mat[0])
if wm_csf_qc_mat[1] < wm_thresh:
print('Subject %s has a very poor WM quality of %s'%(wm_csf_qc_mat[0],wm_csf_qc_mat[2]))
if wm_csf_qc_mat[0] not in outliers_sub_id:
outliers_sub_id.append(wm_csf_qc_mat[0])
return outliers_sub_id
if __name__ == "__main__":
motion_params_npy = '/mnt/project2/home/varunk/fMRI/results/resultsABIDE1_2/'+\
'Preprocess_Datasink/motion_params_paths/motion_params_file_list.npy'
qc_csv_file = '/mnt/project2/home/varunk/fMRI/results/resultsABIDE1_2/'+\
'Preprocess_Datasink/qc_csv/qc.csv'
motion_thresh = 2.5
outliers_percentage_threshold = 30
outliers_sub_ids = motion_outliers(motion_params_npy, motion_thresh, \
outliers_percentage_threshold)
print(outliers_sub_ids)
# print(num_outlier_entries)
# csf_thresh = 0.015
# wm_thresh =
|
# Vorgestellter Ablauf:
# Eingabe von Datum eingeteilt in Tag und Monat
# Ausgabe des jeweiligen Sternzeichens anhand Zuordnung durch das Tool
def herausfinden():
if month == 1 and day < 20 or month == 12 and day > 22:
print("Steinbock")
elif month == 2 and day > 19 or month == 1 and day > 20:
print("Wassermann")
elif month == 2 and day > 20 or month == 3 and day < 20:
print("Fische")
elif month == 3 and day > 21 or month == 4 and day < 20:
print("Widder")
elif month == 4 and day > 21 or month == 5 and day < 21:
print("Stier")
elif month == 5 and day > 22 or month == 6 and day < 21:
print("Zwillinge")
elif month == 6 and day > 22 or month == 7 and day < 22:
print("Krebs")
elif month == 7 and day > 23 or month == 8 and day < 23:
print("Löwe")
elif month == 8 and day > 24 or month == 9 and day < 23:
print("Jungfrau")
elif month == 9 and day > 24 or month == 10 and day < 23:
print("Waage")
elif month == 10 and day > 24 or month == 11 and day < 22:
print("Skorpion")
elif month == 11 and day > 23 or month == 12 and day < 21:
print("Schütze")
months = {1 : "Januar", 2 : "Februar", 3 : "März", 4 : "April", 5 : "Mai", 6 : "Juni", 7 : "Juli", 8 : "August", 9 : "September", 10 : "Oktober", 11 : "November", 12 : "Dezember"}
print("Lass uns Dein Sternzeichen gemeinsam herausfinden!")
month = int(input("Bitte gib Deinen Geburtsmonat in Ziffern ein (Also 1 für Januar usw.)"))
day = int(input("Bitte gib nun Deinen Geburtstag ein: "))
print("Dein Gebutstag ist also der: ")
print(str(day) + ".")
print(months[month])
print("Dein Sternzeichen ist demnach: ")
herausfinden() |
import unittest, os
SRC_PATH = os.path.join(os.path.dirname(__file__), 'src')
TEST_CASES = unittest.defaultTestLoader.discover(SRC_PATH, '*.py')
suite = unittest.TestSuite()
suite.addTest(TEST_CASES)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite)
|
# Generated by Django 3.0.4 on 2020-04-23 06:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('apps', '0017_department_sequence'),
]
operations = [
migrations.AlterField(
model_name='department',
name='address',
field=models.CharField(max_length=100, verbose_name='Адрес'),
),
migrations.AlterField(
model_name='department',
name='email',
field=models.CharField(max_length=20, verbose_name='E-mail'),
),
migrations.AlterField(
model_name='department',
name='fax',
field=models.CharField(max_length=20, verbose_name='Факс'),
),
migrations.AlterField(
model_name='department',
name='name',
field=models.CharField(max_length=30, verbose_name='Название'),
),
migrations.AlterField(
model_name='department',
name='tel',
field=models.CharField(max_length=20, verbose_name='Телефон'),
),
]
|
#!/usr/bin/env python3
import matplotlib.pyplot as plt
from functions_script import vector_mean as mean
def b_1(x, y):
mx = mean(x)
my = mean(y)
numerator = denominator = 0.
for xi, yi in zip(x, y):
numerator += (xi - mx) * (yi - my)
denominator += (xi - mx) ** 2
return numerator / denominator
def b_0(x, y, b1):
return (sum(y) - (b1 * sum(x))) / len(x)
def get_b0_b1(x, y):
b1 = b_1(x, y)
b0 = b_0(x, y, b1)
return b0, b1
def main():
xv = (65, 65, 62, 67, 69, 65, 61, 67)
yv = (105, 125, 110, 120, 140, 135, 95, 130)
# plt.scatter(x=xv, y=yv)
# plt.show()
b, m = get_b0_b1(xv, yv)
# plt.plot(xv, yv, '.')
# plt.plot(m, b, '-')
# plt.show()
print(get_b0_b1(xv, yv))
if __name__ == '__main__':
main()
|
import unittest
from app.code.bank.account import Account
class AccountTest(unittest.TestCase):
def test_create_valid_account(self):
account = Account("001", 50)
self.assertEqual(account.account_number, "001")
self.assertEqual(account.balance, 50)
def test_create_account(self):
account = Account('001', 0)
|
import math
def calcFuel(input):
fuel = math.floor(input / 3) - 2
return fuel
if __name__ == "__main__":
file = open("input.txt")
fuel_sum = 0
for line in file.readlines():
mass = int(line)
fuel = calcFuel(mass)
while fuel > 0:
fuel_sum += fuel
fuel = calcFuel(fuel)
print(fuel_sum)
|
# -*- coding: utf-8 -*-
"""
libs.encoding_utils
~~~~~~~~~~~~~~
force encoding of a file
:copyright: (c) 2012 by arruda.
"""
import os
import shutil
def convert_to_utf8(filename):
# gather the encodings you think that the file may be
# encoded inside a tuple
encodings = ('ascii','iso-8859-1', 'iso-8859-7', 'windows-1253','macgreek')
f = open(filename, 'r').read()
# now start iterating in our encodings tuple and try to
# decode the file
for enc in encodings:
try:
# try to decode the file with the first encoding
# from the tuple.
# if it succeeds then it will reach break, so we
# will be out of the loop (something we want on
# success).
# the data variable will hold our decoded text
data = f.decode(enc)
print "encoded with:%s" % enc
break
except Exception, e:
# if the first encoding fail, then with the continue
# keyword will start again with the second encoding
# from the tuple an so on.... until it succeeds.
# if for some reason it reaches the last encoding of
# our tuple without success, then exit the program.
if enc == encodings[-1]:
raise e
continue
# now get the absolute path of our filename and append .bak
# to the end of it (for our backup file)
fpath = os.path.abspath(filename)
newfilename = fpath + '.bak'
# and make our backup file with shutil
shutil.copy(filename, newfilename)
# and at last convert it to utf-8
f = open(filename, 'w')
try:
f.write(data.encode('utf-8'))
except Exception, e:
raise e
finally:
f.close()
|
integer = 11
float = 34.141
string = "Texto"
#representacion de digito
print("Mi texto {:d}".format(integer))
#representacion binaria
print("Mi texto {:b}".format(integer))
#representacion hexadecimal
print("Mi texto {:b}".format(integer))
#representacion flotante
print("Mi texto {:f}".format(integer)) |
import argparse
import pandas as pd
from owlready2 import *
from knowledge_graph.network_class import Network
def get_edges(ontology, source):
node_network = Network(ontology, source)
node_network.dfs_labeled_edges()
return node_network.get_results()
def test_answer():
assert search_node(get_ontology(onto_path).load()) == []
#need to add in the answer to this unit test.
#still need to make work for when:
#multiple levels of parents
#mutliple parents
#solutions
#reference(?)
def give_alias(property_object):
label_name = property_object.label[0]
label_name = label_name.replace("/","_or_")
label_name = label_name.replace(" ","_")
label_name = label_name.replace(":","_")
property_object.python_name = label_name
#TODO: remove this code and only have it be in the network_class.py code ? Currently, breaks endpoints though if do this.
def main(args):
"""
Main function to output all edges from a reference node.
input: args = args from the argument parser for the function
(refNode, refOntologyPath, outputPath)
output: Saves a csv file of the list of result edges
(list of object, subject, predicate triples)
example: python3 make_network.py "coal mining" "./climate_mind_ontology20200721.owl" "output.csv"
"""
#set argument variables
onto_path = args.refOntologyPath
output_path = args.outputPath
source = args.source
#load ontology
onto = get_ontology(onto_path).load()
#make list of edges along all paths leaving the target node
edges = get_edges(onto, source)
#save output to output Path as csv file. Later can change this to integrate well with API and front-end.
df = pd.DataFrame([[i[0], i[1], i[2]] for i in edges],
columns=['subject', 'object', 'predicate'])
df = df.drop_duplicates() # Remove if we fix network_class
df.to_csv(output_path, index=False)
if __name__=="__main__":
parser = argparse.ArgumentParser(description='get ontology edge from reference node')
parser.add_argument("-source", type=str,
help='the node you want to start from in the ontology, if None, it will use entire ontology')
parser.add_argument("refOntologyPath", type=str,
help='path to reference OWL2 ontology')
parser.add_argument("outputPath", type=str,
help='path for output csv file of result edges (list of object,subject,predicate triples)')
args = parser.parse_args()
main(args)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# collection of funcions related to the marshall gci scripts.
#
import requests, json, os, time
import numpy as np
import concurrent.futures
from astropy.time import Time
import astropy.units as u
import logging
logging.basicConfig(level = logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
MARSHALL_BASE = 'http://skipper.caltech.edu:8080/cgi-bin/growth/'
MARSHALL_SCRIPTS = (
'list_programs.cgi',
'list_candidates.cgi',
'list_program_sources.cgi',
'source_summary.cgi',
'print_lc.cgi',
'ingest_avro_id.cgi',
'save_cand_growth.cgi',
'edit_comment.cgi',
'update_archived_phot.cgi'
)
httpErrors = {
304: 'Error 304: Not Modified: There was no new data to return.',
400: 'Error 400: Bad Request: The request was invalid. An accompanying error message will explain why.',
422: 'Error 422: Invalid Input.',
403: 'Error 403: Forbidden: The request is understood, but it has been refused. An accompanying error message will explain why',
404: 'Error 404: Not Found: The URI requested is invalid or the resource requested, such as a category, does not exists.',
500: 'Error 500: Internal Server Error: Something is broken.',
503: 'Error 503: Service Unavailable.'
}
SCIENCEPROGRAM_IDS = {
'AMPEL Test' : 42,
'Cosmology' : 32,
'Gravitational Lenses' : 43,
'Correlating Icecube and ZTF' : 44,
'Electromagnetic Counterparts to Neutrinos' : 25,
'Test Filter' : 37,
'Redshift Completeness Factor' : 24,
'Weizmann Test Filter' : 45,
'ZTFBH Offnucear' : 47,
'ZTFBH Nuclear' : 48,
'Nuclear Transients' : 10,
#'AmpelRapid' : 67
}
INGEST_PROGRAM_IDS = { # TODO add all of them
'AMPEL Test' : 4,
'Cosmology' : 5
}
def requests_retry_session(retries=3, backoff_factor=0.3, status_forcelist=(500, 502, 504), session=None):
"""
create robust request session. From:
https://www.peterbe.com/plog/best-practice-with-retries-with-requests
"""
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
def growthcgi(scriptname, to_json=True, logger=None, max_attemps=2, **request_kwargs):
"""
Run one of the growth cgi scripts, check results and return.
"""
# get the logger
logger = logger if not logger is None else logging.getLogger(__name__)
# check
if not scriptname in MARSHALL_SCRIPTS:
raise ValueError("scriptname %s not recognized. Available options are: %s"%
(scriptname, ", ".join(MARSHALL_SCRIPTS)))
path = os.path.join(MARSHALL_BASE, scriptname)
# post request to the marshall making several attemps
n_try, success = 0, False
while n_try<max_attemps:
logger.debug('Starting %s post. Attempt # %d'%(scriptname, n_try))
# set timeout from kwargs or use default
timeout = request_kwargs.pop('timeout', 60) + (60*n_try-1)
r = requests_retry_session().post(path, timeout=timeout, **request_kwargs)
logger.debug('request URL: %s?%s'%(r.url, r.request.body))
status = r.status_code
if status != 200:
try:
message = httpErrors[status]
except KeyError as e:
message = 'Error %d: Undocumented error'%status
logger.error(message)
else:
logger.debug("Successful growth connection.")
success = True
break
n_try+=1
if not success:
logger.error("Failure despite %d attemps!"%max_attemps)
return None
# parse result to JSON
if to_json:
try:
rinfo = json.loads(r.text)
except ValueError as e:
# No json information returned, usually the status most relevant
logger.error('No json returned: status %d' % status )
rinfo = status
else:
rinfo = r.text
return rinfo
def get_saved_sources(program_id, trange=None, auth=None, logger=None, **request_kwargs):
"""
get saved sources for a program through the list_program_sources.cgi script.
Eventually specify a time range.
Parameters:
-----------
program_id: `int`
marshal program ID.
trange: `list` or `tuple` or None
time constraints for the query in the form of (start_date, end_date). The
two elements of tis list can be either strings or astropy.time.Time objects.
if None, try to download all the sources at once.
auth: `list`
(username, pwd)
Returns:
--------
list with the saved sources for the program.
"""
# get the logger
logger = logger if not logger is None else logging.getLogger(__name__)
# prepare reuqtest data
req_data = {
'programidx': program_id,
'getredshift': 1,
'getclassification': 1
}
# eventually add dates there
if not trange is None:
# format dates to astropy.Time
start_date, end_date = trange
tstart = Time(start_date) if type(start_date) is str else start_date
tend = Time(end_date) if type(end_date) is str else end_date
tstart = tstart.datetime.strftime("%Y-%m-%d %H:%M:%S")
tend = tend.datetime.strftime("%Y-%m-%d %H:%M:%S")
logger.debug("listing saved sources of scienceprogram ID %d for ingested times between %s and %s"%
(program_id, tstart, tend))
# add date to payload
req_data.update({
'startdate' : tstart,
'enddate' : tend,
})
else:
logger.debug("listing saved sources of scienceprogram ID %d"%program_id)
# execute request
srcs = growthcgi(
'list_program_sources.cgi',
logger=logger,
auth=auth,
data=req_data,
**request_kwargs
)
logger.debug("retrieved %d sources"%len(srcs))
return srcs
def query_scanning_page(
start_date, end_date, program_name, showsaved="selected", auth=None,
logger=None, program_id=None, **request_kwargs):
"""
return the sources in the scanning page of the given program ingested in the
marshall between start_date and end_date.
"""
# TODO: the ID used to query the scanning page seems to be different from
# the one you get from lits programs.
# get the logger
logger = logger if not logger is None else logging.getLogger(__name__)
# get scienceprogram number
scienceprogram = program_id
if program_id is None:
scienceprogram = SCIENCEPROGRAM_IDS.get(program_name)
if scienceprogram is None:
raise KeyError("cannot find scienceprogram number corresponding to program %s. We have: %s"%
(program_name, repr(SCIENCEPROGRAM_IDS)))
# format dates to astropy.Time
tstart = Time(start_date) if type(start_date) is str else start_date
tend = Time(end_date) if type(end_date) is str else end_date
tstart = tstart.datetime.strftime("%Y-%m-%d %H:%M:%S")
tend = tend.datetime.strftime("%Y-%m-%d %H:%M:%S")
logger.debug("querying scanning page of program %s (scienceprogram %d) for ingested times between %s and %s"%
(program_name, scienceprogram, tstart, tend))
# query and return sources as json
srcs = growthcgi(
'list_candidates.cgi',
logger=logger,
auth=auth,
data={
'scienceprogram' : scienceprogram,
'startdate' : tstart,
'enddate' : tend,
'showSaved' : showsaved
},
**request_kwargs
)
logger.debug("retrieved %d sources"%len(srcs))
return srcs
def query_marshal_timeslice(query_func, trange=None, tstep=5*u.day, nworkers=12, max_attemps=2, raise_on_fail=False, logger=None):
"""
splice up a marhsla query in time so that each request is manageble.
Issue the query function in time slices each of tstep days (optionally
limiting the global time range) and glue the results together.
The queries are executed in using a thread pool.
Parameters:
-----------
query_func: `callable`
query function to be run over the time slices. It must have the signature
query_func(tstart, tstop) and must return a list.
trange: `list` or `tuple` or None
time constraints for the query in the form of (start_date, end_date). The
two elements of tis list can be either strings or astropy.time.Time objects.
if None, all the sources in the scanning page are retrieved slicing the
query in smaller time steps. Since the marhsall returns at max 200 candidates
per query, if tis limit is reached, the time range of the query is
subdivided iteratively.
tstep: `astropy.quantity`
time step to use to splice the query.
nworkers: `int`
number of threads in the pool that are used to download the stuff.
max_attemps: `int`
this function will re-iterate the download on the jobs that fails until
complete success or until the maximum number of attemps is reaced.
raise_on_fail: `bool`
if after the max_attemps is reached, there are still failed jobs, the
function will raise and exception if raise_on_fail is True, else it
will simply throw a warning.
Returns:
--------
list with the glued results from all the time-slice queries
"""
# get the logger
logger = logger if not logger is None else logging.getLogger(__name__)
# parse time limts
if not trange is None:
start_date = trange[0]
end_date = trange[1]
else:
start_date = "2018-03-01 00:00:00"
end_date = (Time.now() +1*u.day).datetime.strftime("%Y-%m-%d %H:%M:%S")
# subdivide the query in time steps
start, end = Time(start_date), Time(end_date)
times = np.arange(start, end, tstep).tolist()
times.append(end)
logger.info("Querying marshal with %s between %s and %s using dt: %.2f h"%
(query_func.__name__, start_date, end_date, tstep.to('hour').value))
# create list of time bounds
tlims = [ [times[it-1], times[it]] for it in range(1, len(times))]
# utility functions for multiprocessing
def query_func_wrap(tlim): return query_func(tlim[0], tlim[1])
def threaded_downloads(todo_tlims, candidates):
"""
download the sources for specified tlims and keep track of what you've done
"""
n_total, n_failed = len(todo_tlims), 0
with concurrent.futures.ThreadPoolExecutor(max_workers = nworkers) as executor:
jobs = {
executor.submit(query_func_wrap, tlim): tlim for tlim in todo_tlims}
# inspect completed jobs
for job in concurrent.futures.as_completed(jobs):
tlim = jobs[job]
# inspect job result
try:
# collect all the results
candids = job.result()
candidates += candids
logger.debug("Query from %s to %s returned %d candidates. Total: %d"%
(tlim[0].iso, tlim[1].iso, len(candids), len(candidates)))
# if job is successful, remove the tlim from the todo list
todo_tlims.remove(tlim)
except Exception as e:
logger.error("Query from %s to %s generated an exception %s"%
(tlim[0].iso, tlim[1].iso, repr(e)))
n_failed+=1
# print some info
logger.debug("jobs are done: total %d, failed: %d"%(n_total, n_failed))
# loop through the list of time limits and spread across multiple threads
start = time.time()
candidates = [] # here you collect sources you've successfully downloaded
n_try, todo_tlims = 0, tlims # here you keep track of what is done and what is still to be done
while len(todo_tlims)>0 and n_try<max_attemps:
logger.debug("Querying the marshal. Iteration number %d: %d jobs to do"%
(n_try, len(todo_tlims)))
threaded_downloads(todo_tlims, candidates)
n_try+=1
end = time.time()
# notify if it's still not enough
if len(todo_tlims)>0:
mssg = "Query for the following time interavals failed:\n"
for tl in todo_tlims: mssg += "%s %s\n"%(tl[0].iso, tl[1].iso)
if raise_on_fail:
raise RuntimeError(mssg)
else:
logger.error(mssg)
# check for duplicates
logger.info("Fetched %d candidates/sources in %.2e sec"%(len(candidates), (end-start)))
return candidates
def ingest_candidates(
avro_ids, program_name, program_id, query_program_id, be_anal,
max_attempts=3, auth=None, logger=None, **request_kwargs):
"""
ingest one or more candidate(s) by avro id into the marhsal.
If needed we can be anal about it and go and veryfy the ingestion.
avor_ids can be a list with more than one
"""
# remember the time to be able to go veryfy downloaded candidates
start_ingestion = Time.now() - 24*u.hour #TODO: restrict once you are certain it works
# get the logger
logger = logger if not logger is None else logging.getLogger(__name__)
# # get the program id used by the ingest page
# ingest_pid = INGEST_PROGRAM_IDS.get(program_name)
# if ingest_pid is None:
# raise KeyError("cannot find program %s in SCIENCEPROGRAM_IDS. Availables are: %s"
# %", ".join(SCIENCEPROGRAM_IDS.keys()))
# apparently the ingestion prefers to use the 'user specific' program id
# rather than the other ones. TODO: figure out if this is a consistent behaviour
ingest_pid = program_id
# see if you want to ingest just one candidates or a whole bunch of them
# cast everything to string for consistency and checking
if type(avro_ids) in [str, int]:
to_ingest = [str(avro_ids)]
else:
to_ingest = [str(aid) for aid in avro_ids]
logger.info("Trying to ingest %d candidate(s) to to marhsal program %s using ingest ID %d"%
(len(to_ingest), program_name, ingest_pid))
# If there is nothing to ingest, we are done with no failures :)
if len(to_ingest) == 0 :
failed = []
return failed
# ingest all the candidates, eventually veryfying and retrying
n_attempts, failed = 0, []
while len(to_ingest)>0 and n_attempts < max_attempts:
n_attempts+=1
logger.debug("attempt number %d of %d."%(n_attempts, max_attempts))
# ingest them
for avro_id in to_ingest:
status = growthcgi(
'ingest_avro_id.cgi',
logger=logger,
auth=auth,
to_json=False,
data={'avroid': avro_id, 'programidx': str(ingest_pid)},
**request_kwargs
)
logger.debug("Ingesting candidate %s returned %s"%(avro_id, status))
logger.info("Attempt %d: done ingesting candidates."%n_attempts)
# if you take life easy then it's your problem. We'll exit the loop
if not be_anal:
return None
# if you want to be anal about that, go and make sure all the candidates are there
end_ingestion = Time.now() + 10*u.min
logger.info("veryfying ingestion looking at candidates ingested between %s and %s"%
(start_ingestion.iso, end_ingestion.iso))
done, failed = [], [] # here overwite global one
try:
new_candidates = query_scanning_page(
start_date=start_ingestion.iso,
end_date=end_ingestion.iso,
program_name=program_name,
showsaved="selected",
program_id=query_program_id,
auth=auth,
logger=logger,
**request_kwargs)
# if you got none it could mean you haven't ingested them.
# but could also be just a question of time. Death is the only certainty
if len(new_candidates) == 0:
logger.warning("attempt # %d. No new candidates, upload seems to have failed."%n_attempts)
failed = to_ingest
continue
# see if the avro_id is there (NOTE: assume that the 'candid' in the sources stores the 'avro_id')
ingested_ids = [str(dd['candid']) for dd in new_candidates]
for avro_id in to_ingest:
if avro_id in ingested_ids:
done.append(avro_id)
else:
failed.append(avro_id)
logger.info("attempt # %d. Of the desired candidates %d successfully ingested, %d failed"%
(n_attempts, len(done), len(failed)))
# remember what is still to be done
to_ingest = failed
except Exception as e:
logger.warning("could not query candidate page. Got exception %s"%e)
# return the list of ids that failed consistently after all the attempts
return failed
|
# APP CONFIGS
SECRET_KEY='develop',
CACHE_TYPE='null',
|
import argparse
import my_utils
import pyproj
from map_html import TOP as MAP_TOP
from map_html import BOTTOM as MAP_BOTTOM
from map_html import CIRCLE_RED_20, CIRCLE_RED_50
class Runner(object):
def __init__(self, args):
# self.user_id = args.user_id
# self.start_time = args.start_time
# self.stop_time = args.stop_time
# self.shed = args.shed
self._bandwidth = args.bandwidth
self._myproj = pyproj.Proj("+init=EPSG:32613")
self._transit = my_utils.TransitData()
self._transit.load_stops()
self._bus_stops = self._transit.get_data()
print "Got bandwidth", self._bandwidth
def plot_stops(self):
print "Plotting %d points..." % len(self._bus_stops)
map_name = "./data/maps/bus_stops.html"
f = open(map_name, "w")
f.write(MAP_TOP)
f.write("var circle = {\n")
i = 1
for point in self._bus_stops.itervalues():
lon = point.get('lon')
lat = point.get('lat')
f.write("%d: {center:{lat: %f, lng: %f},},\n" % (i, lat, lon))
i+= 1
f.write("};\n")
f.write(CIRCLE_RED_50)
f.write(MAP_BOTTOM)
f.close()
print "Wrote file: %s" % map_name
def plot_grid_utm(self):
steps_x = 100
steps_y = 50
min_point, max_point = self._transit.get_bounding_box_utm()
print min_point
print max_point
border = 500.0
min_x = min_point[0] - border
max_x = max_point[0] + border
min_y = min_point[1] - border
max_y = max_point[1] + border
step_size_x = (max_x - min_x) / steps_x
step_size_y = (max_y - min_y) / steps_y
map_name = "./data/maps/access_grid.html"
f = open(map_name, "w")
f.write(MAP_TOP)
f.write("var circle = {\n")
i = 0
for x in xrange(steps_x + 1):
for y in xrange(steps_y + 1):
utm_x = min_x + x * step_size_x
utm_y = min_y + y * step_size_y
lon, lat = self._myproj(utm_x, utm_y, inverse=True)
f.write("%d: {center:{lat: %f, lng: %f},},\n" % (i, lat, lon))
i+= 1
f.write("};\n")
f.write(CIRCLE_RED_50)
f.write(MAP_BOTTOM)
f.close()
print "Wrote file: %s" % map_name
def plot_grid(self):
steps_x = 100
steps_y = 50
min_point, max_point = self._transit.get_bounding_box()
print min_point
print max_point
border = 0.0
min_x = min_point[0] - border
max_x = max_point[0] + border
min_y = min_point[1] - border
max_y = max_point[1] + border
step_size_x = (max_x - min_x) / steps_x
step_size_y = (max_y - min_y) / steps_y
map_name = "./data/maps/access_grid.html"
f = open(map_name, "w")
f.write(MAP_TOP)
f.write("var circle = {\n")
i = 0
for x in xrange(steps_x + 1):
for y in xrange(steps_y + 1):
lat = min_x + x * step_size_x
lon = min_y + y * step_size_y
# lon, lat = self._myproj(utm_x, utm_y, inverse=True)
f.write("%d: {center:{lat: %f, lng: %f},},\n" % (i, lat, lon))
i+= 1
f.write("};\n")
f.write(CIRCLE_RED_50)
f.write(MAP_BOTTOM)
f.close()
print "Wrote file: %s" % map_name
def test_bounding_box(self):
min_l, max_l = self._transit.get_bounding_box()
print min_l, max_l
min_lat = min_l[0]
min_lon = min_l[1]
max_lat = max_l[0]
max_lon = max_l[0]
print self._myproj(min_lon, min_lat)
print self._myproj(max_lon, max_lat)
print self._myproj(min_lon, max_lat)
print self._myproj(max_lon, min_lat)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Accessibility Heatmap')
parser.add_argument("--bandwidth", help="KDE Bandwidth", type=float, required=True)
# parser.add_argument("--start_time", help="Start Time", type=str, required=True)
# parser.add_argument("--stop_time", help="Stop Time", type=str, required=True)
# parser.add_argument("--shed", help="SHED", type=int, required=True)
args = parser.parse_args()
runner = Runner(args)
# runner.plot_stops()
runner.plot_grid()
# runner.plot_grid_utm()
#runner.test_bounding_box() |
"""
5. Faça um Programa que converta metros para centímetros.
5.1. 1 metro e igual a 100cm
5.2. cm = metro / 0.01
"""
def metros(x):
cm = x / 0.01
return str(int(cm)) + 'cm'
if __name__ == '__main__':
assert metros(1) == '100cm'
assert metros(2) == '200cm'
assert metros(3) == '300cm'
|
# Python
multiples_of_3 = filter(lambda x: x % 3 == 0, \
[1, 2, 3, 4, 5, 6, 7, 8, 9])
|
# Copyright 2022 NVIDIA Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numpy as np
import pytest
import cunumeric as num
from legate.core import LEGATE_MAX_DIM
def _check(a, routine, sizes):
b = getattr(np, routine)(*a)
c = getattr(num, routine)(*a)
is_equal = True
err_arr = [b, c]
if len(b) != len(c):
is_equal = False
err_arr = [b, c]
else:
for each in zip(b, c):
if not np.array_equal(*each):
err_arr = each
is_equal = False
break
print_msg = f"np.{routine}({sizes})"
assert is_equal, (
f"Failed, {print_msg}\n"
f"numpy result: {err_arr[0]}\n"
f"cunumeric_result: {err_arr[1]}\n"
f"cunumeric and numpy shows different result\n"
)
print(f"Passed, {print_msg}, np: {b}, cunumeric: {c}")
DIM = 10
SIZE_CASES = list((DIM,) * ndim for ndim in range(LEGATE_MAX_DIM + 1))
SIZE_CASES += [
(0,), # empty array
(1,), # singlton array
]
# test to run atleast_nd w/ a single array
@pytest.mark.parametrize("size", SIZE_CASES, ids=str)
def test_atleast_1d(size):
a = [np.arange(np.prod(size)).reshape(size)]
_check(a, "atleast_1d", size)
@pytest.mark.parametrize("size", SIZE_CASES, ids=str)
def test_atleast_2d(size):
a = [np.arange(np.prod(size)).reshape(size)]
_check(a, "atleast_2d", size)
@pytest.mark.parametrize("size", SIZE_CASES, ids=str)
def test_atleast_3d(size):
a = [np.arange(np.prod(size)).reshape(size)]
_check(a, "atleast_3d", size)
# test to run atleast_nd w/ list of arrays
@pytest.mark.parametrize("dim", range(1, 4))
def test_atleast_nd(dim):
a = list(np.arange(np.prod(size)).reshape(size) for size in SIZE_CASES)
_check(a, f"atleast_{dim}d", SIZE_CASES)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(sys.argv))
|
import numpy as np
from fractions import Fraction
a = np.matrix([[1, 2], [1, 1]])
u = np.array([[1], [1]])
for i in range(10):
u = a * u
print u
def rec(n, i):
if i == 0:
val = 2
elif i % 3 == 2:
val = 2 * (i // 3 + 1)
else:
val = 1
if i == n:
return val
else:
return val + Fraction(1, rec(n, i+1))
def frac_exp(n):
return rec(n - 1, 0)
for i in range(1, 10):
print frac_exp(i)
f = frac_exp(100)
print f
print f.numerator
print sum((int(char) for char in str(f.numerator)))
|
import math
events = []
time = 0
class Event:
def __init__(self, delay, name, params):
global events
self.when = delay + time
self.name = name
self.params = params
global events
events.append(self)
@staticmethod
def popEvent():
"""
:return:
:rtype: Event
"""
global time
global events
target = None
when = math.inf
if len(events) == 0:
return None
# Znalezienie najwcześniejszego eventu
for event in events:
if event.when < when:
target = event
when = event.when
events.remove(target)
time = when
return target
|
from pyspark.sql import SQLContext
from pyspark.sql import HiveContext
from pyspark.sql.types import *
import ml_processing #import steel_thread
from pyspark import SparkContext
import forecast_data_v4 #import forecast_data_v3
import numpy as np
import pandas as pd
sc = SparkContext()
hive_context = HiveContext(sc)
sqlContext = SQLContext(sc)
outageData=sc.textFile("file:///home/w205/w205_final_project/final_thread/outage_history.csv")
states = ["Rhode Island", "Massachusetts"]
# Get state outage and weather data
def get_state_data():
for s in states:
print(s)
# Do we need to blow away the pointers/tables/other vars first?
# prepare the outage data for the state
outage = outageData.filter(lambda x: s in x)
stateoutage = outage.map(lambda r: r.split(","))
Outages = stateoutage.map(lambda p: (p[2], p[4], p[5], p[8], p[12])) # I could not figure out how to properly parse this...
outageSchemaString = 'DATETIME HR MIN AREA NUMCUSTOMERS' # If the above gets updated, this would too (of course)
outageFields = [StructField(field_name, StringType(), True) for field_name in outageSchemaString.split()]
outageSchema = StructType(outageFields)
schemaOutageData = sqlContext.createDataFrame(Outages, outageSchema)
schemaOutageData.registerTempTable('State_Outages')
show_outages = sqlContext.sql('SELECT * FROM State_Outages')
#show_outages.show()
# prepare weather history data
if len(s) > 1:
s_ = s.replace(" ", "_")
else:
continue
# Assumes weather history files will be in directories by state
filename = "file:///home/w205/w205_final_project/final_thread/weather_history_" + s_ + ".csv"
print(filename)
weatherData = sc.textFile(filename)
weatherRecords = weatherData.map(lambda r: r.split(","))
State_Weather = weatherRecords.map(lambda p: (p[5], p[6], p[26], p[27], p[28], p[30], p[37], p[38], p[39], p[40], p[41], p[42], p[43], p[44], p[46]))
weatherSchemaString = 'DTS ReportType maxTemp minTemp aveTemp aveHumidity WeatherCodes Precip Snowfall SnowDepth aveStationPressure aveSeaLevelPressure aveWindSpeed maxWindSpeed SustainedWindSpeed'
weatherFields = [StructField(field_name, StringType(), True) for field_name in weatherSchemaString.split()]
weatherSchema = StructType(weatherFields)
schemaWeatherData = sqlContext.createDataFrame(State_Weather, weatherSchema)
schemaWeatherData.registerTempTable('State_Weather')
show_weather = sqlContext.sql('SELECT * FROM State_Weather')
show_weather.show()
# combine outage and weather history datasets
result_weatherOutage = sqlContext.sql('SELECT to_date(w.DTS) as DT ,w.maxTemp ,w.minTemp ,w.aveTemp ,w.aveHumidity ,w.WeatherCodes ,w.Precip ,w.Snowfall ,w.SnowDepth ,w.aveStationPressure ,w.aveSeaLevelPressure ,w.aveWindSpeed ,w.maxWindSpeed, w.SustainedWindSpeed ,case when o.DATETIME is null then 0 else 1 end as OutageIND FROM State_Weather w left outer join Outages o on to_date(w.DTS) = to_date(concat(substr(DATETIME,7,4),"-",substr(DATETIME,1,2),"-",substr(DATETIME,4,2))) WHERE w.ReportType="SOD" and year(to_date(w.DTS))=2016 and month(to_date(w.DTS))=2 ORDER BY DT LIMIT 100')
#result_weatherOutage.show()
# process combined dataset, first training data then training labels. Maybe combine to one loop?
feature_data = result_weatherOutage.select('aveWindSpeed', 'maxTemp', 'minTemp', 'aveHumidity').collect()
train_data = []
train_labels = []
for a in feature_data:
aveWindSpeed = float('nan')
maxTemp = float('nan')
minTemp = float('nan')
#aveHumidity = float('nan')
if a.aveWindSpeed:
aveWindSpeed = float(a.aveWindSpeed)
if a.maxTemp:
maxTemp = float(a.maxTemp)
if a.minTemp:
minTemp = float(a.minTemp)
if a.aveHumidity:
aveHumidity = float(a.aveHumidity)
train_data.append([aveWindSpeed, maxTemp, minTemp])
label_data = result_weatherOutage.select('OutageIND').collect()
for a in label_data:
if np.isnan(a.OutageIND):
train_labels.append(float('nan'))
else:
train_labels.append(float(a.OutageIND))
#train_data_temp = np.array(train_data).reshape(-1,1)
train_data_temp = np.array(train_data)
train_labels_temp = np.array(train_labels).reshape(-1,1)
# Does this properly keep the same records?
train_labels = train_labels_temp[~np.isnan(train_data_temp).any(1)]
train_data = train_data_temp[~np.isnan(train_data_temp).any(1)]
test_data = forecast_data_v4.get_forecast()
result_probabilities, result_predictions = ml_processing.lr_prediction(train_data, train_labels, test_data)
outage_probabilities = result_probabilities[:,1]
prediction_results = result_predictions
dates = forecast_data_v4.get_dates()
return s, outage_probabilities, prediction_results, dates
print(get_state_data())
|
from django.urls import path
from django.urls.conf import re_path, path
from .apis import *
urlpatterns = [
path('exports/add', AddExportApi.as_view(), name='export_add'),
re_path(r'^exports/list/(?:start=(?P<start>(?:19|20)\d{2}(0[1-9]|1[012])))&(?:end=(?P<end>(?:19|20)\d{2}(0[1-9]|1[012])))$', ExportListApi.as_view(), name='export_list'),
path('exports/update/<int:export_id>', UpdateExportApi.as_view(), name='export_update'),
path('exports/delete/<int:export_id>', DeleteExportApi.as_view(), name='export_delete'),
] |
__all__ = [
'ResetRepositoryCounters',
]
from gim.core.tasks.repository import RepositoryJob
class ResetRepositoryCounters(RepositoryJob):
queue_name = 'reset-repo-counters'
def run(self, queue):
super(ResetRepositoryCounters, self).run(queue)
counters = self.object.counters
counters.update_global()
counters.update_users()
|
x, y = map(int, input().split(' '))
c = 0
for n in range(1, y + 1):
c += 1
if c != x:
print(n, end=' ')
if c == x:
print(n)
c = 0 |
while True:
a, b = map(int,input().split())
if a + b == 0 :
break;
else :
print(a ** b)
|
import json
import os
import string
import subprocess
import random
from shutil import copy, rmtree
from web3 import Web3
from src.util.config import Config
from src.util.web3 import web3_provider
from brownie import accounts, project
from tests.utils.keys import get_viewing_key
signer_accounts = ['0xA48e330838A6167a68d7991bf76F2E522566Da33', '0x55810874c137605b96e9d2B76C3089fcC325ed5d',
'0x984C31d834d1F13CCb3458f4623dB21975FE4892', '0x552B5078a9044688F6044B147Eb2C8DFb538737e']
def deploy_eth():
cfg = Config()
with open('./src/contracts/ethereum/compiled/MultiSigSwapWallet.json', 'r') as f:
contract_source_code = json.loads(f.read())
w3 = web3_provider(cfg['eth_node_address'])
account = w3.eth.account.from_key("0xb84db86a570359ca8a16ad840f7495d3d8a1b799b29ae60a2032451d918f3826")
print(f"Deploying on {cfg['network']} from address {account.address}")
balance = w3.eth.getBalance(account.address, "latest")
if balance < 1000000000000:
print("You gotta have some cash dawg")
return
# Instantiate and deploy contract
contract = w3.eth.contract(abi=contract_source_code['abi'], bytecode=contract_source_code['data']['bytecode']['object'])
tx = contract.constructor(signer_accounts, cfg['signatures_threshold'],)
nonce = w3.eth.getTransactionCount(account.address, "pending")
raw_tx = tx.buildTransaction(transaction={'from': account.address, 'gas': 3000000, 'nonce': nonce})
signed_tx = account.sign_transaction(raw_tx)
tx_hash = w3.eth.sendRawTransaction(signed_tx.rawTransaction)
# .transact()
# Get transaction hash from deployed contract
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
print(f"Deployed at: {tx_receipt.contractAddress}")
multisig_wallet = w3.eth.contract(address=tx_receipt.contractAddress, abi=contract_source_code['abi'])
print("All done")
def rand_str(n):
alphabet = string.ascii_letters + string.digits
return ''.join(random.choice(alphabet) for i in range(n))
def deploy_scrt():
configuration = Config()
multisig_account = configuration["multisig_acc_addr"]
deployer = "secret1qcz0405jctqvar3e5wmlsj2q5vrehgudtv5nqd"
tx_data = {"admin": multisig_account, "name": "Coin Name", "symbol": "ETHR", "decimals": 6,
"initial_balances": [], "config": {}, "prng_seed": "aa"}
cmd = f"secretcli tx compute instantiate 1 --label {rand_str(10)} '{json.dumps(tx_data)}'" \
f" --from t1 -b block -y"
res = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
res = subprocess.run("secretcli query compute list-contract-by-code 1 | jq '.[-1].address'",
shell=True, stdout=subprocess.PIPE)
configuration['secret_token_address'] = res.stdout.decode().strip()[1:-1]
tx_data = { "owner": multisig_account }
cmd = f"secretcli tx compute instantiate 2 --label {rand_str(10)} '{json.dumps(tx_data)}'" \
f" --from t1 -b block -y"
res = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
res = subprocess.run("secretcli query compute list-contract-by-code 2 | jq '.[-1].address'",
shell=True, stdout=subprocess.PIPE)
configuration['secret_swap_contract_address'] = res.stdout.decode().strip()[1:-1]
res = subprocess.run(f"secretcli q compute contract-hash {configuration['secret_swap_contract_address']}",
shell=True, stdout=subprocess.PIPE).stdout.decode().strip()[2:]
configuration['code_hash'] = res
if __name__ == '__main__':
deploy_eth()
# deploy_scrt()
|
from classes.ListNode import ListNode
class Solution:
def removeElements(self, head: ListNode, val: int) -> ListNode:
"""
https://leetcode.com/problems/remove-linked-list-elements/
had to delete the head elements differently
"""
if head is None:
return None
while(head is not None and head.val == val):
head = head.next
temp = head
prev = None
while temp is not None:
if temp.val == val:
prev.next = temp.next
else:
prev = temp
temp = temp.next
return head
|
#!/usr/bin/env python
from elasticsearch_dsl import Search, A
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import NotFoundError
import pytz
import datetime
from tzlocal import get_localzone
ES_SERVER = '172.31.23.21:9200'
class ELK(object):
def __init__(self):
self.elk_host_array = ES_SERVER.split(",")
self.client = Elasticsearch(hosts=self.elk_host_array, timeout=60)
self.index = 'sql_ash*'
self.search_client = Search(using=self.client, index=self.index)
def last_query(self):
hostname = 'elkserver'
starttime = local_string_to_utc('2019-11-26 12:00:00')
endtime = local_string_to_utc('2019-11-26 13:00:00')
query_search = self.search_client
query_search = self.search_client.filter('range', **{'@timestamp': {'gte': starttime, 'lte': endtime}})
#query_search = query_search.query({"term": {"machine": "hytest"}})
#query_search = query_search.filter("term", name=hostname.lower())
#query_search = query_search.query({"match": {"metricset.name": "diskio"}})
#query_search = query_search.query({"terms": {"metricset.name": ["filesystem",]}})
#query_search = query_search.query({"term": {"host.name": "hytest"}})
#query_search = query_search.query({"term": {"system.network.name": "lo"}})
#query_search = query_search.sort({"@timestamp": {"order": "desc"}})
query_search = query_search[0:1000]
try:
response = query_search.execute()
except NotFoundError:
return None
else:
return response.to_dict()['hits']['hits']
def local_string_to_utc(time_string, time_format='%Y-%m-%d %H:%M:%S'):
try:
local_dt = datetime.datetime.strptime(time_string, time_format)
local_tz = get_localzone()
localize_dt = local_tz.localize(local_dt)
return localize_dt.astimezone(pytz.UTC)
except ValueError:
raise ValueError
a = ELK()
res = a.last_query()
for i in res:
#print i.get('_source').get("system").get("network").get("out").get("bytes")
print i
#for i in a.last_query():
# res = i['_source']
# print res['@timestamp'], res['event']
#if res['system']:
# if res['system'].get('process'):
# print res['@timestamp'], res['system'].get('process').get('memory')
|
from collections import *
#User function Template for python3
class Solution:
#Function to return list of integers visited in snake pattern in matrix.
def snakePattern(self, matrix):
leftRight=True
output=[]
for i in range(len(matrix)):
currentLevel=deque([])
for j in range(len(matrix)):
if leftRight:
currentLevel.append(matrix[i][j])
else:
currentLevel.appendleft(matrix[i][j])
leftRight=not leftRight
output.extend(currentLevel)
return output
|
import itertools
import logging
import pandas as pd
from bitarray import bitarray
from aq.aq_description import Fact
class FactBase:
def __init__(self, target_prop):
self.target_prop = target_prop
self.positives = {}
self.negatives = {}
self.properties = []
self.num_events = 0
self.num_props = 0
def __str__(self):
return 'FactBase for property {0} ({1} props, {2} events: p={3}, n={4}):\n\t'.format(self.target_prop,
len(self.properties),
self.num_events,
len(self.positives),
len(
self.negatives)) + '\n\t'.join(
[b.to01() for b in itertools.chain(self.positives.values(), self.negatives.values())])
def __repr__(self):
return 'FactBase for property {0} (p={1}, n={2}):\n\t'.format(self.target_prop,
len(self.positives), len(self.negatives))
def build(self, data, class_description):
target_index = data.columns.get_loc(self.target_prop.canon_attr_name)
self.properties = [prop for prop in class_description.properties if not prop == self.target_prop]
self.num_props = len(self.properties)
dup_counter = 0
miss_counter = 0
for i, row in data.iterrows():
data_value = row[target_index]
if not pd.isnull(data_value):
b = bitarray(self.num_props)
for j, prop in enumerate(self.properties):
value = row[prop.attr_id]
b[j] = False if pd.isnull(value) else value in prop.values
if data_value in self.target_prop.values and b not in self.positives.values():
self.positives[i] = b
elif data_value not in self.target_prop.values and b not in self.negatives.values():
self.negatives[i] = b
else:
dup_counter += 1
else:
miss_counter += 1
self.num_events = len(self.positives) + len(self.negatives)
logging.debug('\tDelete {0} duplicated events'.format(dup_counter))
logging.debug('\tMiss {0} missing target column events'.format(miss_counter))
def clear(self):
counter = 0
for key in list(self.negatives.keys()):
if self.negatives[key] in self.positives.values():
del self.negatives[key]
counter += 1
self.num_events -= counter
logging.debug('\tDelete {0} conflicted events'.format(counter))
class JSMHypothesis:
def __init__(self, value, generator):
self.value = value
self.generator = generator
def __str__(self):
return 'Hypothesis {0} by {1}'.format(self.value.to01(), [i for i, x in enumerate(self.generator) if x])
def __repr__(self):
return self.value.to01()
def __eq__(self, other):
return self.value == other.value and self.generator == other.generator
def __ge__(self, other):
return ((self.value | other.value) == self.value) and ((self.generator | other.generator) == self.generator)
def __hash__(self):
return 3 * hash(self.value.to01()) + 5 * hash(str(self.generator))
def search_norris(fb):
if fb.positives and fb.negatives:
pos_inters = _search_norris(fb.positives)
neg_inters = _search_norris(fb.negatives)
logging.debug('\tIt was found {0} pos and {1} neg hypothesis'.format(len(pos_inters), len(neg_inters)))
conf_counter, dup_counter = 0, 0
for p_inter in pos_inters[:]:
for n_inter in neg_inters:
unit = p_inter.value | n_inter.value
if len(p_inter.generator) < 2 or unit == p_inter.value or unit == n_inter.value:
pos_inters.remove(p_inter)
conf_counter += 1
break
l = pos_inters[:]
for i, tmp1 in enumerate(l):
for j, tmp2 in enumerate(l):
if not i == j and (tmp1.value | tmp2.value) == tmp1.value:
pos_inters.remove(tmp1)
dup_counter += 1
break
logging.debug('\tIt were deleted {0} conflicted and {1} surplus hypothesis'.format(conf_counter, dup_counter))
return pos_inters
else:
logging.debug('\tThere is no positives or negatives examples in FB')
return []
def _search_norris(positives):
# Relation R=AxB, A - objects, B - features, Mk - maximal rectangles (maximal intersections)
hypotheses = []
# print(positives.keys())
b = bitarray(max(positives.keys()) + 1)
# print(b)
b.setall(0)
for key, value in positives.items(): # find object xkR
# compute collection Tk={Ax(B intersect xkR): AxB in Mk-1}
tmp_gen = [JSMHypothesis(value & h.value, h.generator.copy()) for h in hypotheses]
# eliminate the members of Tk which are proper subsets of other members of Tk;
# remaining sets are the members of T'k
tmp_hyps = []
for i, tmp1 in enumerate(tmp_gen):
if tmp1.value.any():
for j, tmp2 in enumerate(tmp_gen):
if not i == j and tmp2 >= tmp1:
tmp_hyps.append(None)
break
else:
tmp_hyps.append(tmp1)
else:
tmp_hyps.append(None)
# for each CxD in Mk-1
new_hyps = []
add_example = True
for i, hyp in enumerate(hypotheses):
# if D subsetoreq xkR then (C unite xk)xD in Mk
if (hyp.value | value) == value:
hyp.generator[key] = 1
else:
# if D not susetoreq xkR then CxD in Mk, and (C unite xk)x(D intersect xkR) in Mk
# if and only if emptyset noteq Cx(D intersect xkR) in T'k
new_hyp = tmp_hyps[i]
if new_hyp:
new_hyp.generator[key] = 1
new_hyps.append(new_hyp)
if not value.any() or (hyp.value | value) == hyp.value:
add_example = False
hypotheses.extend(new_hyps)
# xk x xkR in Mk if and only if emptyset noteq xkR notsubsetoreq D for all XxD in Mk - 1
if add_example:
c = b.copy()
c[key] = 1
hypotheses.append(JSMHypothesis(value, c))
# print(hypotheses)
return hypotheses
def test1():
fb = FactBase(Fact(0, {'1'}))
fb.positives = {1: bitarray('11000'), 2: bitarray('11010'), 3: bitarray('11100')}
fb.negatives = {4: bitarray('00101'), 5: bitarray('00110'), 6: bitarray('00011')}
hypotheses = search_norris(fb)
hyp1 = hypotheses
# print('\n'.join(map(str, hypotheses)))
return hyp1
def test2(): # square
fb = FactBase(Fact(0, {'1'}))
fb.positives = {1: bitarray('111100100'), 2: bitarray('010101010'), 3: bitarray('011001101')}
fb.negatives = {4: bitarray('111010011'), 5: bitarray('100011011'), 6: bitarray('011001011')}
hypotheses = search_norris(fb)
hyp2 = hypotheses
# print('\n'.join(map(str, hypotheses)))
return hyp2
def test3(): # my test
fb = FactBase(Fact(0, {'1'}))
fb.positives = {1: bitarray('11100'), 2: bitarray('10011'), 3: bitarray('11011')}
fb.negatives = {4: bitarray('10001'), 5: bitarray('01110'), 6: bitarray('01010')}
hypotheses = search_norris(fb)
hyp3 = hypotheses
# print('\n'.join(map(str, hypotheses)))
return hyp3
if __name__ == '__main__':
# print('\nStart test 1 :')
# t1 = test1()
print('\nStart test 2 :')
t2 = test2()
print(t2)
# print('\nStart test 3 :')
# t3 = test3()
|
'''
Created on 4 Jul 2011
@author: Will Rogers
Tests for the RecordFactory.
'''
import unittest
from apel.db.loader.record_factory import RecordFactory, RecordFactoryException
from apel.db.records import JobRecord
from apel.db.records import SummaryRecord
class Test(unittest.TestCase):
def setUp(self):
self._get_msg_text()
self._rf = RecordFactory()
def tearDown(self):
pass
def test_create_records(self):
try:
self._rf.create_records(self._rubbish_text)
self.fail('No exception thrown by nonsense message.')
except RecordFactoryException:
# We expect the nonsense message to fail.
pass
def test_create_jrs(self):
try:
records = self._rf.create_records(self._jr_text)
if (len(records) != 2):
self.fail('Expected two records from record text.')
for record in records:
if not isinstance(record, JobRecord):
self.fail('Expected JobRecord object.')
except Exception, e:
self.fail('Exception thrown when creating records from object: ' + str(e))
def test_create_srs(self):
try:
records = self._rf.create_records(self._sr_text)
if (len(records) != 2):
self.fail('Expected two records from record text.')
for record in records:
if not isinstance(record, SummaryRecord):
self.fail('Expected SummaryRecord object.')
except Exception, e:
self.fail('Exception thrown when creating records from object: ' + str(e))
def _get_msg_text(self):
# Below, I've just got some test data hard-coded.
self._rubbish_text = '''In 1869, the stock ticker was invented.
It was an electro-mechanical machine consisting of a typewriter,
a long pair of wires and a ticker tape printer,
and its purpose was to distribute stock prices over long
distances in realtime. This concept gradually evolved into
the faster, ASCII-based teletype.'''
self._jr_text = '''\
APEL-individual-job-message: v0.1
Site: RAL-LCG2
SubmitHost: ce01.ncg.ingrid.pt:2119/jobmanager-lcgsge-atlasgrid
LocalJobId: 31564872
LocalUserId: atlasprd019
GlobalUserName: /C=whatever/D=someDN
FQAN: /voname/Role=NULL/Capability=NULL
WallDuration: 234256
CpuDuration: 2345
Processors: 2
NodeCount: 2
StartTime: 1234567890
EndTime: 1234567899
MemoryReal: 1000
MemoryVirtual: 2000
ServiceLevelType: Si2k
ServiceLevel: 1000
%%
Site: RAL-LCG2
SubmitHost: ce01.ncg.ingrid.pt:2119/jobmanager-lcgsge-atlasgrid
LocalJobId: 31564873
LocalUserId: atlasprd019
GlobalUserName: /C=whatever/D=someDN
FQAN: /voname/Role=NULL/Capability=NULL
WallDuration: 234256
CpuDuration: 2345
Processors: 2
NodeCount: 2
StartTime: 1234567890
EndTime: 1234567899
MemoryReal: 1000
MemoryVirtual: 2000
ServiceLevelType: Si2k
ServiceLevel: 1000
%%'''
self._sr_text = '''\
APEL-summary-job-message: v0.1
Site: RAL-LCG2
Month: 3
Year: 2010
GlobalUserName: /C=whatever/D=someDN
VO: atlas
VOGroup: /atlas
VORole: Role=production
WallDuration: 234256
CpuDuration: 2345
NumberOfJobs: 100
%%
Site: RAL-LCG2
Month: 4
Year: 2010
GlobalUserName: /C=whatever/D=someDN
VO: atlas
VOGroup: /atlas
VORole: Role=production
WallDuration: 234256
CpuDuration: 2345
NumberOfJobs: 100
%%
''' |
import random
rock = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
paper = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
scissors = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
rockPaperScissorsList = [rock, paper, scissors]
NPC_Choice = random.randint(0, 2)
Person_Choice = int(input("What do you choose? Type 0 for Rock, 1 for Paper, or 2 for Scissors.\n"))
if Person_Choice >= 3 or Person_Choice < 0:
print("Sorry, I didn't understand your input. Please try again")
elif Person_Choice == NPC_Choice:
print(rockPaperScissorsList[Person_Choice])
print(f"Computer chose:\n{rockPaperScissorsList[NPC_Choice]}")
print("You tie")
elif Person_Choice == 0 and NPC_Choice == 1:
print(rockPaperScissorsList[Person_Choice])
print(f"Computer chose:\n{rockPaperScissorsList[NPC_Choice]}")
print("You lose")
elif Person_Choice == 0 and NPC_Choice == 2:
print(rockPaperScissorsList[Person_Choice])
print(f"Computer chose:\n{rockPaperScissorsList[NPC_Choice]}")
print("You win!")
elif Person_Choice == 1 and NPC_Choice == 0:
print(rockPaperScissorsList[Person_Choice])
print(f"Computer chose:\n{rockPaperScissorsList[NPC_Choice]}")
print("You win!")
elif Person_Choice == 1 and NPC_Choice == 2:
print(rockPaperScissorsList[Person_Choice])
print(f"Computer chose:\n{rockPaperScissorsList[NPC_Choice]}")
print("You lose")
elif Person_Choice == 2 and NPC_Choice == 0:
print(rockPaperScissorsList[Person_Choice])
print(f"Computer chose:\n{rockPaperScissorsList[NPC_Choice]}")
print("You lose")
elif Person_Choice == 2 and NPC_Choice == 1:
print(rockPaperScissorsList[Person_Choice])
print(f"Computer chose:\n{rockPaperScissorsList[NPC_Choice]}")
print("You win!")
|
#!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name="rosmap",
version="0.2",
packages=find_packages(),
scripts=['rosmap-launcher'],
install_requires=['GitPython>=2.1.8',
'pyyaml>=4.2b1',
'pyquery>=1.4.0',
'urllib3',
'certifi',
'python-hglib>=2.6.1',
'svn>=0.3.46',
'python-dateutil>=2.7.5',
'cpplint'],
include_package_data=True,
author="Marc Pichler",
author_email="marc.pichler@joanneum.at",
license="MIT",
description="Clones and analyzes ROS-Packages.",
url="https://github.com/jr-robotics/rosmap",
project_urls={
"Source Code": "https://github.com/jr-robotics/rosmap"
},
python_requires='~=3.5',
)
|
import numpy as np
import matplotlib.pyplot as plt
POISSON_PARAM = 3
UNIFORM_FRONT = np.sqrt(3)
def normalized_distribution(x):
return (1 / np.sqrt(2 * np.pi)) * np.exp(-x * x / 2)
def laplace_distribution(x):
return (1 / np.sqrt(2)) * np.exp(-np.sqrt(2) * np.abs(x))
def uniform_distribution(x):
flag = (x <= UNIFORM_FRONT)
return 1 / (2 * UNIFORM_FRONT) * flag
def cauchy_distribution(x):
return 1 / (np.pi * (1 + x * x))
def poisson_distribution(x):
k = POISSON_PARAM
return (np.power(x, k) / np.math.factorial(k)) * np.exp(-x)
func_dict = {
'normal': normalized_distribution,
'laplace': laplace_distribution,
'uniform': uniform_distribution,
'cauchy': cauchy_distribution,
'poisson': poisson_distribution,
}
def generate_laplace(x):
return np.random.laplace(0, 1/np.sqrt(3), x)
def generate_uniform(x):
return np.random.uniform(-UNIFORM_FRONT, UNIFORM_FRONT, x)
def generate_poisson(x):
return np.random.poisson(POISSON_PARAM, x)
generate_dict = {
'normal': np.random.standard_normal,
'laplace': generate_laplace,
'uniform': generate_uniform,
'cauchy': np.random.standard_cauchy,
'poisson': generate_poisson,
}
def draw(array, func, sector):
if sector == 3:
plt.title('Histograms for 20, 100, 500 elements. Distribution: ' + func)
plt.subplot(130+sector)
plt.hist(array, 10, density=True)
xx = np.linspace(np.min(array), np.max(array), 100)
plt.plot(xx, func_dict[func](xx), 'r')
def research(distribution_type):
plt.figure("distribution " + distribution_type)
num = 20
sector = 1
for i in range(3):
draw(generate_dict[distribution_type](num), distribution_type, sector)
num *= 5
sector += 1
plt.show()
if __name__ == "__main__":
research('normal')
research('laplace')
research('cauchy')
research('poisson')
research('uniform')
|
"""MIPT Python Course Lection 25"""
visited = [False]
ans = []
def dfs(start, G, visited):
visited[start] = True
for u in G[start]:
if not visited[u]:
dfs()
ans.append(start)
for i in range(1, u+1):
if not visited[i]:
dfs(i, G, visited, ans)
ans[i] = ans[::-1]
|
def addNumbers():
welcomeAdd = "This is the Add Section"
print(welcomeAdd)
firstNumber = int(input("Input the first number to add"))
secondNumber = int(input("Input the Second number to add"))
moreNumberInputs = input("Would you like to add more numbers?")
moreNumberInputs = moreNumberInputs.lower()
if moreNumberInputs =="yes":
thirdNumber = int(input("Input the Third number"))
changeOperator =input("Would you like to times,add,subtract or divide this number?")
changeOperator.lower()
if changeOperator =="add":
print(firstNumber, "+", secondNumber, "+", thirdNumber, "=", firstNumber + secondNumber + thirdNumber)
elif changeOperator =="subtract":
print(firstNumber, "+", secondNumber, "-", thirdNumber, "=", firstNumber + secondNumber - thirdNumber)
elif changeOperator =="times":
print(firstNumber, "+", secondNumber, "x", thirdNumber, "=", secondNumber * thirdNumber + firstNumber)
elif changeOperator =="divide":
print(firstNumber, "+", secondNumber, "+", thirdNumber, "=", secondNumber / thirdNumber + firstNumber)
elif changeOperator:
error = "Error 3: No operator deteccted, running sum with addition operator rerun the command to change the operator"
print(error)
print(firstNumber, "+", secondNumber, "+", thirdNumber, "=", firstNumber + secondNumber + thirdNumber)
elif moreNumberInputs =="no":
print(firstNumber, "+", secondNumber, "=", firstNumber+secondNumber)
elif moreNumberInputs:
error = "Error 2: No Recognized statement detected, running sum with previously stated numbers"
print(error)
print(firstNumber, "+", secondNumber, "=", firstNumber+secondNumber,)
def subtractNumbers():
welcomeSubtract = "This is the Subtract Section"
print(welcomeSubtract)
firstNumber = int(input("Input the First Number to Subtract"))
secondNumber = int(input("Input the Second Number to Subtract"))
moreNumberInputs = input("Would you like to add more numbers?")
moreNumberInputs = moreNumberInputs.lower()
if moreNumberInputs =="yes":
thirdNumber = int(input("Input the Third Number"))
changeOperator =input("Would you like to times,add,subtract or divide this number?")
changeOperator.lower()
if changeOperator =="add":
print(firstNumber, "-", secondNumber, "+", thirdNumber, "=", firstNumber - secondNumber + thirdNumber)
elif changeOperator =="subtract":
print(firstNumber, "-", secondNumber, "-", thirdNumber, "=", firstNumber - secondNumber - thirdNumber)
elif changeOperator =="times":
print(firstNumber, "-", secondNumber, "x", thirdNumber, "=", secondNumber * thirdNumber - firstNumber)
elif changeOperator =="divide":
print(firstNumber, "-", secondNumber, "+", thirdNumber, "=", secondNumber / thirdNumber - firstNumber)
elif changeOperator:
error = "Error 3: No operator deteccted, running sum with subtraction operator rerun the command to change the operator"
print(error)
print(firstNumber, "-", secondNumber, "-", thirdNumber, "=", firstNumber - secondNumber - thirdNumber)
elif moreNumberInputs =="no":
print(firstNumber, "-", secondNumber, "=", firstNumber-secondNumber)
elif moreNumberInputs:
error = "Error 2: No Recognized statement detected, running sum with previously stated numbers"
print(error)
print(firstNumber, "-", secondNumber, "=", firstNumber-secondNumber,)
def divideNumbers():
welcomeDivide = "This is the Divide Section"
print(welcomeDivide)
firstNumber = int(input("Input the First Number to Divide"))
secondNumber = int(input("Input the Second Number to Divide"))
moreNumberInputs = input("Would you like to add more numbers?")
moreNumberInputs = moreNumberInputs.lower()
if moreNumberInputs =="yes":
thirdNumber = int(input("Input the Third Number"))
changeOperator=input("Would you like to times,add,subtract or divide this number")
changeOperator.lower()
if changeOperator == "add":
print(firstNumber, "/", secondNumber, "+", thirdNumber, "=", firstNumber / secondNumber + thirdNumber)
elif changeOperator =="subtract":
print(firstNumber, "/", secondNumber, "-", thirdNumber, "=", firstNumber / secondNumber - thirdNumber)
elif changeOperator =="times":
print(firstNumber, "/", secondNumber, "x", thirdNumber, "=", secondNumber * thirdNumber / firstNumber)
elif changeOperator =="divide":
print(firstNumber, "/", secondNumber, "/", thirdNumber, "=", secondNumber / thirdNumber / firstNumber)
elif changeOperator:
error = "Error 3: No operator deteccted, running sum with division operator rerun the command to change the operator"
print(error)
print(firstNumber, "-", secondNumber, "-", thirdNumber, "=", firstNumber - secondNumber - thirdNumber)
elif moreNumberInputs =="no":
print(firstNumber, "/", secondNumber, "=", firstNumber/secondNumber)
elif moreNumberInputs:
error = "Error 2: No Recognized statement detected, running sum with previously stated numbers"
print(error)
print(firstNumber, "/", secondNumber, "=", firstNumber/secondNumber,)
def multiplyNumbers():
welcomeTimes = "This is the Multiply Section"
print(welcomeTimes)
firstNumber = int(input("Input the First Number to Multiply"))
secondNumber = int(input("Input the Second Number to Multiply"))
moreNumberInputs = input("Would you like to add more numbers?")
moreNumberInputs = moreNumberInputs.lower()
if moreNumberInputs =="yes":
thirdNumber = int(input("Input the Third Number"))
changeOperator=input("Would you like to times,add,subtract or divide this number")
changeOperator.lower()
if changeOperator == "add":
print(firstNumber, "x", secondNumber, "+", thirdNumber, "=", firstNumber * secondNumber + thirdNumber)
elif changeOperator =="subtract":
print(firstNumber, "x", secondNumber, "-", thirdNumber, "=", firstNumber / secondNumber * thirdNumber)
elif changeOperator =="times":
print(firstNumber, "x", secondNumber, "x", thirdNumber, "=", secondNumber * thirdNumber * firstNumber)
elif changeOperator =="divide":
print(firstNumber, "x", secondNumber, "/", thirdNumber, "=", secondNumber / thirdNumber * firstNumber)
elif changeOperator:
error = "Error 3: No operator deteccted, running sum with multiplication operator rerun the command to change the operator"
print(error)
print(firstNumber, "-", secondNumber, "-", thirdNumber, "=", firstNumber - secondNumber - thirdNumber)
elif moreNumberInputs =="no":
print(firstNumber, "x", secondNumber, "=", firstNumber*secondNumber)
elif moreNumberInputs:
error = "Error 2: No Recognized statement detected, running sum with previously stated numbers"
print(error)
print(firstNumber, "x", secondNumber, "=", firstNumber*secondNumber,)
def welcome():
welcomeToProgram = "Welcome to my basic calculator program, you can use up to three numbers for each method"
print(welcomeToProgram)
def calculate():
welcome()
Calculator=input("Would you like to add subtract, divide or multiply")
Calculator = Calculator.lower()
if Calculator=="add":
addNumbers()
elif Calculator=="subtract":
subtractNumbers()
elif Calculator=="divide":
divideNumbers()
elif Calculator=="multiply":
multiplyNumbers()
else:
print("Error 1:Method not stated, Enter the command again and try again")
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'login.ui'
#
# Created by: PyQt5 UI code generator 5.12
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def openwindow(self):
self.window = QtWidgets.QMainWindow()
self.ui = Ui_Page2()
self.ui.setupui(self.window)
self.window.show()
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(734, 428)
self.label_username = QtWidgets.QLabel(Form)
self.label_username.setGeometry(QtCore.QRect(160, 130, 121, 31))
self.label_username.setObjectName("label_username")
self.label_password = QtWidgets.QLabel(Form)
self.label_password.setGeometry(QtCore.QRect(160, 190, 111, 21))
self.label_password.setObjectName("label_password")
self.txt_input_username = QtWidgets.QLineEdit(Form)
self.txt_input_username.setGeometry(QtCore.QRect(380, 140, 201, 21))
self.txt_input_username.setObjectName("txt_input_username")
self.txt_input_password = QtWidgets.QLineEdit(Form)
self.txt_input_password.setGeometry(QtCore.QRect(380, 190, 201, 21))
self.txt_input_password.setObjectName("txt_input_password")
self.btn_submit = QtWidgets.QPushButton(Form)
self.btn_submit.setGeometry(QtCore.QRect(380, 240, 211, 32))
self.btn_submit.setObjectName("btn_submit")
self.text_title = QtWidgets.QTextEdit(Form)
self.text_title.setGeometry(QtCore.QRect(60, 10, 629, 51))
self.text_title.setObjectName("text_title")
self.radioButton = QtWidgets.QRadioButton(Form)
self.radioButton.setGeometry(QtCore.QRect(390, 290, 191, 20))
self.radioButton.setObjectName("radioButton")
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Login Page"))
self.label_username.setText(_translate("Form", "Enter User Name"))
self.label_password.setText(_translate("Form", "Enter Password"))
self.btn_submit.setText(_translate("Form", "Submit"))
self.btn_submit.clicked.connect(self.btn_submit_handler)
self.text_title.setHtml(_translate("Form", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'.SF NS Text\'; font-size:13pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:24pt; color:#cf0f39;\">Welcome to University of Briodgeport Login Page </span></p></body></html>"))
self.radioButton.setText(_translate("Form", "Keep me Logged In"))
def btn_submit_handler(self):
val_pass = self.txt_input_password.text()
val_username = self.txt_input_username.text()
if val_pass == "admin" and val_username == "admin":
print("welcome")
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
|
'''
Alessia Pizzoccheri - CS 5001 02
'''
import random
import turtle
import kmeans_viz
DATA = [
[-32.97, -21.06], [9.01, -31.63], [-20.35, 28.73], [-0.18, 26.73],
[-25.05, -9.56], [-0.13, 23.83], [19.88, -18.32], [17.49, -14.09],
[17.85, 27.17], [-30.94, -8.85], [4.81, 42.22], [-4.59, 11.18],
[9.96, -35.64], [24.72, -11.39], [14.44, -43.31], [-10.49, 33.55],
[4.24, 31.54], [-27.12, -17.34], [25.24, -12.61], [20.26, -4.7],
[-16.4, -19.22], [-15.31, -7.65], [-26.61, -20.31], [15.22, -30.33],
[-29.3, -12.42], [-50.24, -21.18], [-32.67, -13.11], [-30.47, -17.6],
[-23.25, -6.72], [23.08, -9.34], [-25.44, -6.09], [-37.91, -4.55],
[0.14, 34.76], [7.93, 49.21], [-6.76, 12.14], [-19.13, -2.24],
[12.65, -7.23], [11.25, 25.98], [-9.03, 22.77], [9.29, -26.2],
[15.83, -1.45], [-22.98, -27.37], [-25.12, -23.35], [21.12, -26.68],
[20.39, -24.66], [26.69, -28.45], [-45.42, -25.22], [-8.37, -21.09],
[11.52, -16.15], [7.43, -32.89], [-31.94, -11.86], [14.48, -10.08],
[0.63, -20.52], [9.86, 13.79], [-28.87, -17.15], [-29.67, -22.44],
[-20.94, -22.59], [11.85, -9.23], [30.86, -21.06], [-3.8, 22.54],
[-5.84, 21.71], [-7.01, 23.65], [22.5, -11.17], [-25.71, -14.13],
[-32.62, -15.93], [-7.27, 12.77], [26.57, -13.77], [9.94, 26.95],
[-22.45, -23.18], [-34.7, -5.62], [29.53, -22.88], [0.7, 31.02],
[-22.52, -10.02], [-23.36, -14.54], [-19.44, -12.94], [-0.5, 23.36],
[-45.27, -19.8], [8.95, 13.63], [47.16, -14.46], [5.57, 4.85],
[-19.03, -25.41], [28.16, -13.86], [-15.42, -14.68], [10.19, -25.08],
[0.44, 23.65], [-20.71, -20.94], [35.91, -20.07], [42.81, -21.88],
[5.1, 9.33], [-15.8, -18.47], [5.39, -26.82], [-40.53, -17.16],
[-29.54, 23.72], [7.8, 23.4], [-22.19, -27.76], [-23.48, -25.01],
[-21.2, -21.74], [23.14, -24.14], [-28.13, -13.04], [-24.38, -6.79] ]
SQUARE_ROOT = 0.5
POWER = 2
NUM_CENTROIDS = 4
DISTANCE = 100000000
COLORS = ['Purple','Red','Blue','Orange']
def initialize_centroids():
'''
Name: initialize_centroids
Parameters: None
Return: list
'''
centroids = []
# pick 4 random centroids from DATA
while len(centroids) < NUM_CENTROIDS:
point = random.choice(DATA)
# if centroid does not match existing centroids
# append to the list
if point not in centroids:
centroids.append(point)
return centroids
def euclidean(a,b):
'''
Name: euclidean
Parameters: list,list
Return: float
'''
# calculate Euclidean distance
x = ( a[0] - b[0] ) ** POWER
y = ( a[1] - b[1] ) ** POWER
tot = ( x + y )
distance = tot ** SQUARE_ROOT
return distance
def create_cluster(centroids):
'''
Name: create_cluster
Parameter: nested list
Return: nested list
'''
assignment = []
for i in range(len(DATA)):
min_distance = DISTANCE
centroid_index = None
for p in range(len(centroids)):
# calculate Euclidean distance for each DATA and centroids
distance = euclidean(DATA[i],centroids[p])
# if new distance less than the current one, update new distance
if distance < min_distance:
min_distance = distance
centroid_index = p
# create assignment pair and append it to assignment list
assignment_pairs = [i,centroid_index]
assignment.append(assignment_pairs)
return assignment
def optimize_centroids(centroids,assignment):
'''
Name; optimize_centroids
Parameter: nested list, nested list
Return: nested list
'''
new_centroids = []
for i in range(len(centroids)):
# empty lists for the x and coordinates
x_coordinates = []
y_coordinates = []
# assign each DATA pair value to respective centroid
for j in range(len(assignment)):
if assignment[j][1] == i:
data_index = assignment[j][0]
x_coordinates.append(DATA[data_index][0])
y_coordinates.append(DATA[data_index][1])
# sum total values of x and total values of y
x_tot = sum(x_coordinates)
y_tot = sum(y_coordinates)
# calculate the average value of x coordinate and y coordinate
x_average = x_tot / len(x_coordinates)
y_average = y_tot / len(y_coordinates)
# assign new coordinates to each centroid
pair = [x_average,y_average]
# append pairs to empty new centroids list
new_centroids.append(pair)
return new_centroids
def main():
# create centroids
centroids = initialize_centroids()
# create assignment list of centroids and DATA
assignment = create_cluster(centroids)
# uncomment line 141 and 142 to see initial centroids
# kmeans_viz.draw_centroids(centroids,COLORS)
# kmeans_viz.draw_assignment(centroids, DATA, assignment, COLORS)
# improve centroids' coordinates
centroids = optimize_centroids(centroids,assignment)
# update assignment based on improved centroids
assignment = create_cluster(centroids)
# draw clusters
kmeans_viz.draw_centroids(centroids,COLORS)
kmeans_viz.draw_assignment(centroids, DATA, assignment, COLORS)
main() |
from django.shortcuts import render
from common.models import Injection, CRI
from calc.forms import CalcInjForm, CRISimpleForm, CRIAdvancedForm, CRIInsulinForm, CRICPRForm, CRIMetoclopramideForm
from collections import OrderedDict
def calc_injection(request):
"""Calculates injection dosages based on weight.
GET parameters:
weight: weight in lbs
Contxt:
calculated dose rounded to 3 decimal places
"""
meds = Injection.objects.all()
rx = dict()
# default displayed dosage of 0.0 mLs
for med in meds:
rx[med] = 0.0
rx_ordered = OrderedDict(sorted(rx.items(), key=lambda t: t[0].name))
if request.method == 'GET' and request.is_ajax():
form = CalcInjForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
for med in meds:
rx_ordered[med] = round(med.factor * weight, 3)
return render(request, 'calc/injection.html', {'rx': rx_ordered,
'form': CalcInjForm(),
'navbar': 'calc'})
def calc_cri_simple(request):
"""Calculates simple CRI dosages based on weight.
GET parameters:
weight: weight in kgs
Context:
rx: calculated dosages rounded to 3 decimal places
"""
meds = CRI.objects.filter(calc_type='ez')
form = CRISimpleForm()
rx = dict()
bolus = dict()
# zipped list of rates to dosage with default displayed dosages of 0.0 mL
for med in meds:
rx[med] = list(zip([rate for rate in med.rates],
[0.0 * rate for rate in med.rates]))
if request.method == 'GET' and request.is_ajax():
form = CRISimpleForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
for med in meds:
rx[med] = list(zip([rate for rate in med.rates],
[round(weight * med.factor * rate, 3) for rate in med.rates]))
# bolus is calculated for diltiazem
bolus = {'mg': round(weight * 0.25, 3), 'mL': round(weight * 0.05, 3)}
return render(request, 'calc/cri_simple.html', {'navbar': 'calc',
'form': form,
'rx': rx,
'bolus': bolus})
def calc_cri_advanced(request):
"""Calculates complex CRI dosages based on multiple inputs.
GET parameters:
weight: weight in kgs
rate: current cri rate
volume: current iv volume in mL
infusion: target infusion rate
Context:
rx: calculated dosages rounded to 3 decimal places
"""
meds = CRI.objects.filter(calc_type='adv')
form = CRIAdvancedForm()
rx = dict()
for med in meds:
rx[med] = dict()
if request.method == 'GET' and request.is_ajax():
form = CRIAdvancedForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
rate = float(request.GET['rate'])
volume = float(request.GET['volume'])
infusion = float(request.GET['infusion'])
for med in meds:
rx[med] = {'maint': round((weight * 30 * 2.2)/24, 3),
'maint_plus': round((weight * 30 + 70)/24, 3),
'add': round(((weight * infusion * med.factor) / (rate/60)) * volume, 3)}
return render(request, 'calc/cri_advanced.html', {'navbar': 'calc',
'form': form,
'rx': rx})
def calc_cri_insulin(request):
"""Calculates CRI dosages for insulin
GET parameters:
weight: weight in kgs
rate: current rate
volume: current iv vol in mLs
replacement: target replacement rate
Context:
rx: calculated dosages rounded to 3 decimal places
"""
form = CRIInsulinForm()
rx = dict()
if request.method == 'GET' and request.is_ajax():
form = CRIInsulinForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
rate = float(request.GET['rate'])
volume = float(request.GET['volume'])
replacement = float(request.GET['replacement'])
phosphorus = ((weight * replacement/3) * volume)/rate
rx = {'maint': round((weight * 2.2 * 30)/24, 3),
'maint_plus': round((weight * 30 + 70)/24, 3),
'units_dog': round(((weight * 2.2) / (rate * 24)) * volume, 3),
'units_cat': round((weight * 1.1) / (rate * 24) * volume, 3),
'phosphorus': round(phosphorus, 3),
'phosphorus_excess': round(phosphorus * 4.4 * 1000 / volume, 3)}
return render(request, 'calc/cri_insulin.html', {'navbar': 'calc',
'form': form,
'rx': rx})
def calc_cri_cpr(request):
"""Calculates CRI dosages for post CPR maintenance
GET parameters:
weight: weight in kg
rate: current rate
volume: current iv vol in mL
dobutamine: target dobutamine rate
dopamine: target dopamine rate
lidocaine: target lidocaine rate
Context:
rx: calculated cri dosages rounded to 3 decimal places
"""
form = CRICPRForm()
rx = dict()
if request.method == 'GET' and request.is_ajax():
form = CRICPRForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
rate = float(request.GET['rate'])
volume = float(request.GET['volume'])
dobutamine = float(request.GET['dobutamine'])
dopamine = float(request.GET['dopamine'])
lidocaine = float(request.GET['lidocaine'])
rx = {'maint': round((weight * 2.2 * 30)/24, 3),
'maint_plus': round((weight * 30 + 70)/24, 3),
'dose_dobutamine': round(((weight * dobutamine) / 12500)/(rate/60) * volume, 3),
'dose_dopamine': round((weight * dopamine / 40000)/(rate/60) * volume, 3),
'dose_lidocaine': round((weight * lidocaine / 20000)/(rate/60) * volume, 3),
'dose_epinephrine': round((weight/1000)/(rate/60) * volume, 3),
'dose_mannitol': round(weight * 4, 3),
'dose_solumedrol': round(weight * 30, 3)}
return render(request, 'calc/cri_cpr.html', {'navbar': 'calc',
'form': form,
'rx': rx})
def calc_cri_metoclopramide(request):
"""Calculates CRI dosages for metoclopramide
GET parameters:
weight: weight in kg
rate: current rate
volume: current iv volume in mLs
infusion: target infusion rate
Context:
rx: calculated cri dosages rounded to 3 decimal places
"""
form = CRIMetoclopramideForm()
rx = dict()
if request.method == 'GET' and request.is_ajax():
form = CRIMetoclopramideForm(data=request.GET)
if form.is_valid():
weight = float(request.GET['weight'])
rate = float(request.GET['rate'])
volume = float(request.GET['volume'])
infusion = float(request.GET['infusion'])
dose = (weight * infusion / 5)/(rate * 24) * volume
rx = {'maint': round((weight * 2.2 * 30)/24, 3),
'maint_plus': round((weight * 30 + 70)/24, 3),
'dose': round(dose, 3),
'concentration': round(dose * 5 / volume, 3)}
if request.GET['inc_infusion'] and request.GET['inc_volume']:
inc_volume = float(request.GET['inc_volume'])
inc_infusion = float(request.GET['inc_infusion'])
dose_inc_infusion = inc_infusion + infusion
rx['inc_infusion'] = round(dose_inc_infusion, 3)
rx['inc_dose'] = round(((dose_inc_infusion * weight / (rate * 24)) - (dose * 5 / volume)) * inc_volume / 5, 3)
rx['inc_rate'] = round((dose_inc_infusion * weight)/((dose * 5)/volume)/24, 3)
return render(request, 'calc/cri_metoclopramide.html', {'navbar': 'calc',
'form': form,
'rx': rx})
|
while True:
n = input("Please enter a number (0 to exit) : ")
n = int(n)
if n == 0:
break
print("Square of",n,"is ", n*n) |
#coding:utf-8
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
import matplotlib.pyplot as plt
import random
import tkinter
number = int()
def sim():
nombre_de_départ = number
liste = []
while nombre_de_départ > 0:
liste.append(nombre_de_départ)
for i in range(nombre_de_départ):
i = random.randint(1,6)
if i == 6:
nombre_de_départ -= 1
graph(len(liste), liste)
def graph(x, y):
X=range(0,x)
Y=y
graph = plt.Figure(figsize=(5,3))
graph.suptitle('Représentation du nombre de dés présents à chaque lancer')
graph.add_subplot(111).set_xlabel('Nombre de lancers')
graph.add_subplot(111).set_ylabel('Nombre de dés')
graph.add_subplot(111).plot(X,Y,'.', color='grey')
graph.add_subplot(111).axis([0,x,0,Y[0]*1.1])
graph.add_subplot(111).grid()
dis = FigureCanvasTkAgg(graph, win)
dis.get_tk_widget().place(x=30, y=190)
def collect_entry(*args):
global number
number = var_entry.get()
#programme principale
win = tkinter.Tk()
win.geometry('600x400')
win.title('Simulation de dés')
label = tkinter.Label(text='Entrer le nombre de dés présents au départ', font=('Ariel', 15), width=40, height=2)
label.place(x=80, y=10)
var_entry = tkinter.IntVar()
var_entry.trace("w", collect_entry)
entry = tkinter.Entry(win, textvariable=var_entry ,width=50)
entry.place(x=150, y=60)
button = tkinter.Button(win, text='Effectuer la modélisation', width=30, height=5, command=sim)
button.place(x=190, y=90)
win.mainloop()
|
import math
from repartition_experiments.algorithms.utils import *
from repartition_experiments.algorithms.policy_remake import compute_zones_remake
def shape_to_end_coords(M, A, d=3):
'''
M: block shape M=(M1, M2, M3). Example: (500, 500, 500)
A: input array shape A=(A1, A2, A3). Example: (3500, 3500, 3500)
Return: end coordinates of the blocks, in each dimension. Example: ([500, 1000, 1500, 2000, 2500, 3000, 3500],
[500, 1000, 1500, 2000, 2500, 3000, 3500],
[500, 1000, 1500, 2000, 2500, 3000, 3500])
'''
return [ [ (j+1)*M[i] for j in range(int(A[i]/M[i])) ] for i in range(d)]
def seeks(A, M, D):
'''
A: shape of the large array. Example: (3500, 3500, 3500)
M: coordinates of memory block ends (read or write). Example: ([500, 1000, 1500, 2000, 2500, 3000, 3500],
[500, 1000, 1500, 2000, 2500, 3000, 3500],
[500, 1000, 1500, 2000, 2500, 3000, 3500])
D: coordinates of disk block ends (input or output). Example: ([500, 1000, 1500, 2000, 2500, 3000, 3500],
[500, 1000, 1500, 2000, 2500, 3000, 3500],
[500, 1000, 1500, 2000, 2500, 3000, 3500])
Returns: number of seeks required to write M blocks into D blocks. This number is also the number of seeks
to read D blocks into M blocks.
'''
c = [ 0 for i in range(len(A))] # number of cuts in each dimension
m = [] # number of matches in each dimension
# n = 1 # Total number of disk blocks
# for i in range(len(A)):
# n *= len(D[i])
for d in range(len(A)): # d is the dimension index
nd = len(D[d])
Cd = [ ] # all the cut coordinates (for debugging and visualization)
for i in range(nd): # for each output block, check how many pieces need to be written
if i == 0:
Cid = [ m for m in M[d] if 0 < m and m < D[d][i] ] # number of write block endings in the output block
else:
Cid = [ m for m in M[d] if D[d][i-1] < m and m < D[d][i] ] # number of write block endings in the output block
if len(Cid) == 0:
continue
c[d] += len(Cid) + 1
Cd += Cid
m.append(len(set(M[d]).union(set(D[d]))) - c[d])
s = A[0]*A[1]*c[2] + A[0]*c[1]*m[2] + c[0]*m[1]*m[2] # + n
return s
def compute_baseline_seeks_model(A, I, O):
# computing number of outblocks openings:
read_cuts = shape_to_end_coords(I, A, d=3)
outblocks_cuts = shape_to_end_coords(O, A, d=3)
for i in range(3):
for e in read_cuts[i]:
if e not in outblocks_cuts[i]:
outblocks_cuts[i].append(e)
nb_outfile_openings = len(outblocks_cuts[0])*len(outblocks_cuts[1])*len(outblocks_cuts[2])
ni = int(A[0]/I[0] * A[1]/I[1] * A[2]/I[2])
nb_outfile_seeks = seeks(A, shape_to_end_coords(I, A), shape_to_end_coords(O, A))
nb_infile_openings = ni
nb_infile_seeks = 0
return [nb_outfile_openings, nb_outfile_seeks, nb_infile_openings, nb_infile_seeks]
def compute_keep_seeks_model(A, R, I, O, W, nb_write_buffers):
# computing number of inblocks openings
read_cuts = shape_to_end_coords(R, A, d=3)
inblock_cuts = shape_to_end_coords(I, A, d=3)
for i in range(3):
for e in read_cuts[i]:
if e not in inblock_cuts[i]:
inblock_cuts[i].append(e)
nb_infile_openings = len(inblock_cuts[0])*len(inblock_cuts[1])*len(inblock_cuts[2])
s1 = seeks(A, shape_to_end_coords(R, A), shape_to_end_coords(I, A))
s2 = seeks(A, W, shape_to_end_coords(O, A))
print(f"[Model] total seeks inside inblocks: {s1}, nb_infile_openings: {nb_infile_openings}")
print(f"[Model] total seeks due to read buffers: {s1 + nb_infile_openings}")
s1 += nb_infile_openings
print(f"[Model] total seeks inside outblocks: {s2}, nb outfile openings: {nb_write_buffers}")
print(f"[Model] total seeks due to write buffers: {s2 + nb_write_buffers}")
s2 += nb_write_buffers
return s1 + s2
def get_volumes_to_keep(A, B, O):
# compute theta max
buffers_partition, buffers = get_volumes(A, B)
T_max = [0,0,0]
for buffer_index in buffers.keys():
_3d_index = numeric_to_3d_pos(buffer_index, buffers_partition, order='C')
T, Cs = get_theta(buffers, buffer_index, _3d_index, O, B)
for i in range(3):
if T[i] > T_max[i]:
T_max[i] = T[i]
print(f"Found theta max: {T_max}")
# get volumes to keep
volumestokeep = [1]
if B[1] > T_max[1]:
print(f"{B[1]} > {T_max[1]}")
volumestokeep.extend([2,3])
if B[0] > T_max[0]:
print(f"{B[0]} > {T_max[0]}")
volumestokeep.extend([4,5,6,7])
print(f"volumes to keep: {volumestokeep}")
return volumestokeep
def keep_model_seeks(A, B, O, I):
volumestokeep = get_volumes_to_keep(A, B, O)
outfiles_partition = get_blocks_shape(A, O)
outblocks = get_named_volumes(outfiles_partition, O)
buffers = get_named_volumes(get_blocks_shape(A, B), B)
arrays_dict, _, nb_file_openings, nb_inside_seeks = compute_zones_remake(B, O, A, volumestokeep, outfiles_partition, outblocks, buffers, False)
W = [list(), list(), list()]
nb_write_buffers = 0
for outblock_index, write_buffers in arrays_dict.items():
for write_buff in write_buffers:
nb_write_buffers += 1
p1, p2 = write_buff.get_corners()
for d in range(3):
if not p2[d] in W[d]:
W[d].append(p2[d])
for d in range(3):
W[d].sort()
model_total = compute_keep_seeks_model(A, B, I, O, W, nb_write_buffers)
return model_total, volumestokeep |
name = input("What is your Name?\n")
name_len = len(name)
print("Your Name is" , name, " and It has" , name_len, "Characters") |
from django.contrib import admin
from .models import Field, Lecturer, Course, Lecture, LectureSession, LectureClassSession, Department
admin.site.register(Department)
admin.site.register(Field)
admin.site.register(Lecturer)
admin.site.register(Course)
admin.site.register(Lecture)
admin.site.register(LectureSession)
admin.site.register(LectureClassSession)
|
import sys
import pygame
from pygame.sprite import Group
from settings import Settings
from game_stats import GameStats
from ship import Ship
from ufo import Ufo
import game_functions
from button import Button
from scoreboard import CurrentScore
from scoreboard import HiScore
from scoreboard import Level
from life import Life
import music
def run_game():
pygame.init()
music.play_menu_music()
game_settings = Settings()
screen = pygame.display.set_mode(game_settings.get_screen_size())
pygame.display.set_caption('Alien Invasion')
stats = GameStats(game_settings)
ship = Ship(game_settings, screen)
ufos = Group()
bullets = Group()
stars = Group()
explosions = Group()
play_button = Button(game_settings, screen, 'Play')
current_score = CurrentScore(game_settings, screen, stats)
hi_score = HiScore(game_settings, screen, stats)
level = Level(game_settings, screen, stats)
lifes = Group()
while True:
game_functions.check_events(game_settings, screen,
stats, ship, ufos, bullets, play_button, lifes)
game_functions.star_creation(game_settings, screen, stars)
if stats.game_active:
ship.update()
game_functions.update_bullets(bullets,ufos)
game_functions.update_ufos(game_settings, screen, stats,
ship, ufos, bullets, explosions, lifes)
game_functions.update_screen(game_settings, screen, stats, ship, ufos, bullets, stars,
explosions, play_button, current_score, hi_score, level, lifes)
game_functions.update_collisions(game_settings, screen,
stats, ship, ufos, bullets, explosions, current_score)
game_functions.update_explosions(game_settings, explosions)
game_functions.update_stars(stars, game_settings)
run_game()
|
#!/usr/bin/python3
# Author: Connor McLeod
# Contact: con.mcleod92@gmail.com
# Source code: https://github.com/con-mcleod/MonthlyPerf_Report
# Latest Update: 10 August 2018
import sys, csv, sqlite3, os, glob, re
from xlrd import open_workbook
##############################
# #
# SQL DATABASE CREATION #
# #
##############################
def create_tables(cxn):
"""
Function to create tables in sqlite3
:param cxn: the connection to the sqlite3 database
:return:
"""
cursor = cxn.cursor()
cursor.execute("DROP TABLE IF EXISTS ADJ_FORECAST")
cursor.execute("DROP TABLE IF EXISTS MONTH_GEN")
cursor.execute("""CREATE TABLE IF NOT EXISTS ADJ_FORECAST(
SMI varchar(10),
month int,
year int,
adj_val float
)""")
cursor.execute("""CREATE TABLE IF NOT EXISTS MONTH_GEN(
SMI varchar(10),
month int,
year int,
val float,
unique(SMI, month, year)
)""")
cursor.close()
##############################
# #
# Database handling #
# #
##############################
def dbselect(cxn, query, payload):
"""
Function to select data from an sqlite3 table
:param cxn: connection to the sqlite3 database
:param query: the query to be run
:param payload: the payload for any query parameters
:return results: the results of the search
"""
cursor = cxn.cursor()
if not payload:
rows = cursor.execute(query)
else:
rows = cursor.execute(query,payload)
results = []
for row in rows:
results.append(row)
cursor.close()
return results
def dbexecute(cxn, query, payload):
"""
Function to execute an sqlite3 table insertion
:param cxn: connection to the sqlite3 database
:param query: the query to be run
:param payload: the payload for any query parameters
:return:
"""
cursor = cxn.cursor()
if not payload:
cursor.execute(query)
else:
cursor.execute(query, payload)
##############################
# #
# Helper functions #
# #
##############################
def get_all_SMIs(cxn):
"""
Function to grab all SMIs from the Encompass reports
:param cxn: connection to sqlite3 database
:return all_SMIs: list of all SMIs
"""
query = "SELECT distinct(SMI) from DAILY_GEN"
payload = None
all_SMIs = dbselect(cxn, query, payload)
return all_SMIs
def get_all_months(cxn):
"""
Function to return all months included in the Encompass reports
:param cxn: connection to sqlite3 database
:return all_dates: list of all months in report [mm, yy]
"""
query = """SELECT obs_month, obs_year from DAILY_GEN
group by obs_month, obs_year order by obs_year, obs_month"""
payload = None
all_dates = dbselect(cxn, query, payload)
return all_dates
def create_month_gen(cxn, SMI, date):
"""
Function to create the monthly generation for an SMI in a given month
:param cxn: connection to sqlite3 database
:param SMI: the SMI of interest
:param date: the month and year of interest
:return result: the monthly generation
"""
month = date[0]
year = date[1]
query = """SELECT sum(value), datatype from DAILY_GEN where SMI=? and
obs_month=? and obs_year=?"""
payload = (SMI[0], month, year,)
result = dbselect(cxn, query, payload)
return result
def get_adj_month_gen(cxn, SMI, date, supply_day):
"""
Function to return an SMI's monthly generation with it's supply date
occuring in that month
:param cxn: connection to sqlite3 database
:param SMI: the SMI of interest
:param date: the month and year of interest
:param supply_day: the supply date for the SMI to adjust generation to
:return result: the adjusted monthly generation having removed generation before supply date
"""
month = date[0]
year = date[1]
query = """SELECT sum(value), datatype from DAILY_GEN where SMI=? and
obs_month=? and obs_year=? and obs_day>=?"""
payload = (SMI[0], month, year, supply_day)
result = dbselect(cxn, query, payload)
return result
def get_supply_date(cxn, SMI):
"""
Function to get the supply date of an SMI from the Salesforce data
:param cxn: connection to sqlite3 database
:param SMI: the SMI of interest
:return result: the supply date string
"""
query = "SELECT supply_date from SMI_DETAILS where SMI=?"
payload = (SMI)
result = dbselect(cxn, query, payload)
return result
def get_forecast(cxn, SMI, month):
"""
Function to get the forecast for the given SMI in a specific month
:param cxn: connection to sqlite3 database
:param SMI: the SMI of interest:
:param month: the month of interest
:return result: the forecast value for that SMI for that month
"""
query = "SELECT val from FORECAST where SMI=? and month=?"
payload = (SMI, month)
result = dbselect(cxn, query, payload)
return result
def get_days_in_month(month, year):
"""
Function to return the number of days in a month given a month and year
:param month: the month of interest
:param year: the year of interest
:return: number of days in the given month
"""
if month in [1,3,5,7,8,10,12]:
return 31
elif month in [4,6,9,11]:
return 30
else:
if year in [16, 20, 24, 28]:
return 29
else:
return 28
def month_gen_insert(cxn, SMI, month, year, gen):
"""
Function to insert data in the sqlite table month_gen
:param cxn: connection to sqlite3 database
:param SMI: the SMI of interest
:param month: the month in which the generation data occurred
:param year: the year in which the generation data occurred
:param gen: the generation data after supply date adjustment
:return:
"""
query = """INSERT OR IGNORE INTO MONTH_GEN(SMI, month, year, val)
VALUES (?,?,?,?)"""
payload = (SMI, month, year, gen)
dbexecute(cxn, query, payload)
def adj_forecast_insert(cxn, SMI, month, year, adj_forecast):
"""
Function to insert data in the sqlite table adj_forecast
:param cxn: connection to sqlite3 database
:param SMI: the SMI of interest
:param month: the month in which the generation data occurred
:param year: the year in which the generation data occurred
:param adj_forecast: the forecast value after supply date adjustment
:return:
"""
query = """INSERT OR IGNORE INTO adj_forecast(SMI, month, year, adj_val)
VALUES (?,?,?,?)"""
payload = (SMI, month, year, adj_forecast)
dbexecute(cxn, query, payload)
##############################
# #
# Translating monthly data #
# #
##############################
if __name__ == '__main__':
# terminate program if not executed correctly
if (len(sys.argv) != 1):
print ("Usage: python3 adjuster.py")
exit(1)
# connect to the database and create the tables
DATABASE = "dataset.db"
cxn = sqlite3.connect(DATABASE)
create_tables(cxn)
# get all the sites of interest and dates of data
SMIs = get_all_SMIs(cxn)
dates = get_all_months(cxn)
# for each site manipulate the given data for required adjustments
for SMI in SMIs:
print ("Collating monthly data and adjusting forecast for SMI: " + SMI[0])
supply_date = get_supply_date(cxn, SMI)
if bool(supply_date) and supply_date[0][0] != '':
supply_year = int(supply_date[0][0][2:4])
supply_month = int(supply_date[0][0][5:7])
supply_day = int(supply_date[0][0][8:10])
for date in dates:
month = date[0]
year = date[1]
adj_forecast = get_forecast(cxn, SMI[0], month)[0][0]
# logic to adjust generation/forecast based on supply date
if (year < supply_year):
month_gen = [[0]]
adj_forecast = 0
elif (supply_year == year):
if (month == supply_month):
month_gen = get_adj_month_gen(cxn, SMI, date, supply_day)
days_in_month = get_days_in_month(month, year)
adj_forecast = adj_forecast * (1-(supply_day/days_in_month))
elif (month < supply_month):
month_gen = [[0]]
adj_forecast = 0
else:
month_gen = create_month_gen(cxn, SMI, date)
adj_forecast = get_forecast(cxn, SMI[0], month)[0][0]
else:
month_gen = create_month_gen(cxn, SMI, date)
adj_forecast = get_forecast(cxn, SMI[0], month)[0][0]
# hardcoded solution for the solar farm sites
if (SMI[0]=="6203778594" or SMI[0]=="6203779394"):
adj_forecast = get_forecast(cxn, SMI[0], month)[0][0]
if year == 16:
adj_forecast = 0.933*adj_forecast
elif year == 17:
adj_forecast = 0.926*adj_forecast
elif year == 18:
adj_forecast = 0.919*adj_forecast
month_gen_insert(cxn, SMI[0], month, year, month_gen[0][0])
adj_forecast_insert(cxn, SMI[0], month, year, adj_forecast)
else:
# this handles sites that do not have a supply date populated in Salesforce
print (SMI[0], "does not have a supply date apparently so forecast remains the same")
for date in dates:
month = date[0]
year = date[1]
adj_forecast = get_forecast(cxn, SMI[0], month)[0][0]
adj_forecast_insert(cxn, SMI[0], month, year, adj_forecast)
cxn.commit()
cxn.close()
print ("Complete!")
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.backend.codegen.thrift.target_types import (
ThriftSourcesGeneratorTarget,
ThriftSourceTarget,
)
from pants.engine.target import BoolField
class ScroogeFinagleBoolField(BoolField):
alias = "finagle"
default = False
help = "If True, then also generate Finagle classes for services when using Scrooge as the Thrift generator."
def rules():
return (
ThriftSourceTarget.register_plugin_field(ScroogeFinagleBoolField),
ThriftSourcesGeneratorTarget.register_plugin_field(ScroogeFinagleBoolField),
)
|
from data import DataGeneratorNew
from core import ResNet
import tensorflow as tf
import math
slim = tf.contrib.slim
result_txt_file = "result.txt"
DATASET_DIR = "D:\\competition\\data\\test_img\\test"
CHECKPOINT_DIR = 'D:\\pycharm_program\\UrbanFunctionClassification\\checkpoint'
NUM_CLASSES = 9
BATCHSIZE = 1
def tool(AreaID):
last = 6 - len(AreaID)
for key in range(last):
AreaID = "0" + AreaID
return AreaID
##################### get the input pipline ############################
with tf.name_scope("input"):
DataGenerator = DataGeneratorNew.DataGenerator()
TestDataset = DataGenerator.get_batch(BATCHSIZE, tag="testing")
# get the dataset statistics
test_set_length = 10000
print("test_set_length:%d" % test_set_length)
iterator = TestDataset.make_one_shot_iterator()
next_batch = iterator.get_next()
##################### get the input pipline ############################
##################### setup the network ################################
x = tf.placeholder(tf.float32, shape=(None, 100, 100, 3))
is_training = tf.placeholder('bool', [])
with tf.name_scope("ResNet"):
depth = 50 # 可以是50、101、152
ResNetModel = ResNet.ResNetModel(is_training, depth, NUM_CLASSES)
net_output = ResNetModel.inference(x)
# 评价操作
with tf.name_scope("test"):
pre = tf.argmax(net_output, 1)
##################### setup the network ################################
with tf.Session() as sess:
# initial variables
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
# 判断有没有checkpoint
saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored .....")
# 训练过程
print("testing start")
test_batches_of_epoch = int(math.ceil(test_set_length/BATCHSIZE))
result = []
for step in range(test_batches_of_epoch):
img_batch, visit, AreaID_batch = sess.run(next_batch)
classIDs = sess.run(pre, feed_dict={x: img_batch, is_training: False})
classID = (classIDs[0] + 1)
print("AreaID %d" % AreaID_batch[0])
print(classID)
dict_result = {"AreaID": tool(str(AreaID_batch[0])), "classID": "00" + str(classID)}
result.append(dict_result)
with open(result_txt_file, "w") as f:
for item in result:
f.write(item["AreaID"] + "\t" + item["classID"] + "\n")
|
from datetime import datetime
import glob
import hashlib
import subprocess
#timeFilterTooOld = 3600 * 600
class Monitor :
def __init__(self, conf) :
print "init nginx stuff"
self.filterOlderThan = 3600 * conf["filterOlderThan"]
self.accessLogfiles = glob.glob(conf["accessLogs"])
self.errorLogfiles = glob.glob(conf["errorLogs"])
self.failedAccessWeight = conf["failedAccessWeight"]
self.errorWeight = conf["errorWeight"]
self.status = "ok"
def update(self) :
## Check logs
self.parseLogs()
## Compute status level
self.status = "ok"
sumWeight = len(self.failedAccessLogs) * self.failedAccessWeight \
+ len(self.errorLogs) * self.errorWeight
if (sumWeight > 1) :
self.status = "warning"
if (sumWeight > 2) :
self.status = "error"
# Check service is running
output = subprocess.check_output(['ps', '-A'])
if 'nginx' not in output:
self.status = "alert"
def getStatus(self) :
status = { "status" : self.status,
"logs" : self.relevantLogs
}
return status
######################
### Parse logs ###
######################
def parseLogs(self) :
self.relevantLogs = []
self.failedAccessLogs = []
self.errorLogs = []
# Parse access logs
for fileName in self.accessLogfiles :
self.failedAccessLogs.extend(self.parseAccessLog(fileName))
# Parse error logs
for fileName in self.errorLogfiles :
self.errorLogs.extend(self.parseErrorLog(fileName))
self.relevantLogs.extend(self.failedAccessLogs)
self.relevantLogs.extend(self.errorLogs)
self.relevantLogs.sort()
def parseAccessLog(self, fileName) :
relevantLogs = [ ]
with open(fileName, 'r') as f:
for line in f :
userAndTimestamp, request, code, answer, _, userAgent, _ \
= line.split('"')
code = code.split()[0]
IP, _, user, timestamp, _ = userAndTimestamp.split()
timestamp = datetime.strptime(timestamp[1:], '%d/%b/%Y:%X')
# Filter results that are too old
if (not self.isRecent(timestamp)) :
continue
# Filter results that are not error
if (int(code) < 400) :
continue
userHash = IP+"/"+user+"/"+userAgent
userHash = int(hashlib.sha1(userHash).hexdigest(), 16) % (10 ** 8)
log = "["+str(timestamp)+"] ["+str(userHash)+"] "+request+" ["+code+"]"
relevantLogs.append(log)
return relevantLogs
def parseErrorLog(self, fileName) :
relevantLogs = [ ]
with open(fileName, 'r') as f :
for line in f :
if (line.startswith("PHP message:")) :
continue
lineSplit = line.split()
timestamp = datetime.strptime(lineSplit[0]+" "+lineSplit[1],
'%Y/%m/%d %X')
message = ' '.join(lineSplit[5:])
# Filter results that are too old
if (not self.isRecent(timestamp)) :
continue
log = "["+str(timestamp)+"] [Error] "+message[:200]
relevantLogs.append(log)
return relevantLogs
def isRecent(self,timestamp) :
now = datetime.now()
timeDiff = (now - timestamp).total_seconds()
if (timeDiff > self.filterOlderThan) :
return False
else :
return True
|
s = set()
for w in input().split():
if w in s:
print("no")
exit()
s.add(w)
print("yes")
|
# Euler 46.Goldbach's other conjecture
import math
def primes_sieve2(limit):
a = [True] * limit
a[0] = a[1] = False
for (i, isprime) in enumerate(a):
if isprime:
yield i
for n in range(i*i, limit, i):
a[n] = False
def isComposite(x):
for i in range(2,x):
if x%i==0 and i!=x:
return True
return False
i=9
while True:
if isComposite(i):
n=0
for j in primes_sieve2(i):
probe=math.sqrt((i-j)/2)
if probe.is_integer():
n+=1
if n==0:
break
i+=2
print(i) |
#Filename for easy use
fileName="noErrors.py"
mapFile="testMap"
#Variance for return values
carSpeedSensorError=30
carGyroSensorError=0
#Variance for calculation
carSteerError=0
carSpeedError=0
#Car Settings
carMaxSpeed=100
carMaxSteer=1
carStartX=55
carStartY=80
carStartAngle=-1.57075
#carStartX=400
#carStartY=100
#carStartAngle=0
carLength=100
#First order Lag, T=0->no Lag
carSteerT=0
carSpeedT=0
#World Settings
worldSpacing=10
worldWidth=800
worldHeight=600
#Variance and for GPS
gpsSensorNoise=30
#Probability of wrong Pixel given back
cameraImageNoise=0
#50/update = freq (int only)
cameraUpdate=10
gpsSensorUpdate=10
laserScannerUpdate=10
laserScannerRange=200
#every errorUpdateTime (in sec) new random values for Calculation are generated
errorUpdateTime = 0 |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
def jia(x, y):
return x + y
def jian(x, y):
return x - y
def cheng(x, y):
return x * y
def chu(x, y):
return x / y
operator = {'+': jia, '-': jian, '*': cheng, '/': chu}
print(operator['+'](3, 2))
|
__author__ = 'local admin'
def recursive_modexp(m, e, n):
"""
modular exponentiation
:param m: ?
:param e: ?
:param n: ?
:return:
"""
if e == 0:
return 1
if e % 2 == 1:
return recursive_modexp(m, e-1, n) * m % n
else:
return recursive_modexp(m, e//2, n) ** 2 % n |
import random
import easygui
def initialize_game():
guessNumber = 10
maxNumber = 20
secretNumber = random.randint(1, maxNumber)
return [guessNumber, maxNumber, secretNumber]
def play_game(playerName, guessNumber, maxNumber, secretNumber):
for i in range(guessNumber):
guessCount = i + 1
guessLeft = guessNumber - guessCount
triesTaken = guessNumber - guessLeft
turns = "turns" if guessLeft != 1 else "turn"
# Check for valid input
validInput = False
while not validInput:
guessNum = easygui.integerbox("Choose a number between 1 and " + str(maxNumber) + ":")
if guessNum is not None:
validInput = True
if guessCount == 1:
closeNew = abs(guessNum - secretNumber)
if guessNum < secretNumber:
easygui.msgbox("Your guess is too low! You have " + str(guessLeft) + " more turns, try again.")
elif guessNum > secretNumber:
easygui.msgbox('Your guess is too high! You have ' + str(guessLeft) + ' more turns, try again.')
else:
easygui.msgbox("No way, " + playerName + "! You guessed it on the first try! \n")
break
elif guessCount < guessNumber:
closeOld = closeNew
closeNew = abs(guessNum - secretNumber)
warmer: bool = closeNew < closeOld
colder = closeNew > closeOld
if closeNew == 0:
easygui.msgbox("You win " + playerName + f"! It took you {triesTaken} {turns}\n")
break
elif warmer:
easygui.msgbox("You are getting warmer. You have " + str(guessLeft) + " more " + str(turns))
elif colder:
easygui.msgbox("You are getting colder. You have " + str(guessLeft) + " more " + str(turns))
else:
if guessNum == secretNumber:
easygui.msgbox("You win " + playerName + "! It took you " + str(triesTaken) + " tries.\n")
break
else:
easygui.msgbox('Game over! The real number was ' + str(secretNumber) + '.\n')
def main():
playerName = easygui.enterbox("Welcome. What is your name?")
guessNumber, maxNumber, secretNumber = initialize_game()
play_game(playerName, guessNumber, maxNumber, secretNumber)
if __name__ == "__main__":
main()
|
"""
Copyright 1999 Illinois Institute of Technology
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL ILLINOIS INSTITUTE OF TECHNOLOGY BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Illinois Institute
of Technology shall not be used in advertising or otherwise to promote
the sale, use or other dealings in this Software without prior written
authorization from Illinois Institute of Technology.
"""
import os
from os.path import split, exists, join
import numpy as np
import fabio
from ..ui.pyqt_utils import *
from .hdf5_manager import loadFile
input_types = ['adsc', 'cbf', 'edf', 'fit2d', 'mar345', 'marccd', 'hdf5', 'h5', 'pilatus', 'tif', 'tiff', 'smv']
def getFilesAndHdf(dir_path):
"""
Give the image files and hdf files in a folder selected
:param dir_path: directory path
:return: image list, hdf list
"""
fileList = os.listdir(dir_path)
imgList = []
hdfList = []
for f in fileList:
full_file_name = fullPath(dir_path, f)
if isImg(full_file_name):
imgList.append(f)
else:
toks = f.split('.')
if toks[-1] == 'hdf':
hdfList.append(f)
return imgList, hdfList
def getBlankImageAndMask(path):
"""
Give the blank image and the mask threshold saved in settings
:return: blankImage, mask threshold
"""
mask_file = join(join(path, 'settings'),'mask.tif')
blank_file = join(join(path, 'settings'),'blank.tif')
mask = None
blank_img = None
if exists(mask_file):
mask = fabio.open(mask_file).data
if exists(blank_file):
blank_img = fabio.open(blank_file).data
return blank_img, mask
def getMaskOnly(path):
"""
Give only the mask threshold
:param path: file path
:return: mask threshold
"""
maskonly_file = join(join(path, 'settings'),'maskonly.tif')
if exists(maskonly_file):
return fabio.open(maskonly_file).data
return None
def getImgFiles(fullname, headless=False):
"""
Get directory, all image file names in the same directory and current file index
:param fullname: full name of the file including directory i.e. /aaa/bbb/ccc/ddd.tif (str)
:return: directory (str), list of image file names, and current index i.e /aaa/bbb/ccc, ["ddd.tif","eee.tif"], 0
"""
dir_path, filename = split(str(fullname)) # split directory and file name from full file name
dir_path = str(dir_path)
filename = str(filename)
_, ext = os.path.splitext(str(filename))
current = 0
failedcases = []
filename_index = None
if ext == ".txt":
for line in open(fullname, "r"):
failedcases.append(line.rstrip('\n'))
else:
failedcases = None
if ext in ('.hdf5', '.h5'):
fileList = loadFile(fullname)
imgList = []
for f in fileList[0]:
if failedcases is not None and f not in failedcases:
continue
imgList.append(f)
if len(imgList) == 1 and not headless:
# if only one image in the h5 file, take all the single h5 images in the folder
infMsg = QMessageBox()
infMsg.setText('Single Image H5 File')
infMsg.setInformativeText("The H5 file selected contains only one image. All the H5 files in the current folder containing only one image will be regrouped the same way as a folder containing TIF files.\n")
infMsg.setStandardButtons(QMessageBox.Ok)
infMsg.setIcon(QMessageBox.Information)
infMsg.exec_()
list_h5_files = os.listdir(dir_path)
imgList = []
fileList = [[],[]]
for f in list_h5_files:
_, ext2 = os.path.splitext(str(f))
full_file_name = fullPath(dir_path, f)
if ext2 in ('.hdf5', '.h5'):
file_loader = loadFile(full_file_name)
if len(file_loader[0]) == 1:
if failedcases is not None and file_loader[0][0] not in failedcases:
continue
imgList.append(file_loader[0][0])
fileList[0].append(file_loader[0][0])
fileList[1].append(file_loader[1][0])
if full_file_name == fullname:
filename_index = file_loader[0][0]
imgList.sort()
else:
fileList = os.listdir(dir_path)
imgList = []
for f in fileList:
if failedcases is not None and f not in failedcases:
continue
full_file_name = fullPath(dir_path, f)
_, ext2 = os.path.splitext(str(f))
if isImg(full_file_name) and f != "calibration.tif" and ext2 not in ('.hdf5', '.h5'):
imgList.append(f)
imgList.sort()
if failedcases is None:
if ext in ('.hdf5', '.h5'):
if filename_index is None:
current = 0
else:
current = imgList.index(filename_index)
else:
current = imgList.index(filename)
return dir_path, imgList, current, fileList, ext
def fullPath(filePath, fileName):
"""
Combine a path and file name to get full file name
:param filePath: directory (string)
:param fileName: file name (string)
:return: filePath/filename (string)
"""
# if filePath[-1] == '/':
# return filePath+fileName
# else:
# return filePath+"/"+fileName
return os.path.join(filePath, fileName)
def isImg(fileName):
"""
Check if a file name is an image file
:param fileName: (str)
:return: True or False
"""
nameList = fileName.split('.')
return nameList[-1] in input_types
def isHdf5(fileName):
"""
Check if a file name is an hdf5 file
:param fileName: (str)
:return: True or False
"""
nameList = fileName.split('.')
return nameList[-1] in ('hdf5', 'h5')
def ifHdfReadConvertless(fileName, img):
"""
Check if a file name is an hdf5 file
and convert it to be directly readable without converting to tiff
:param fileName, img: (str), (array)
:return: img converted
"""
if isHdf5(fileName):
img = img.astype(np.int32)
img[img==4294967295] = -1
return img
def createFolder(path):
"""
Create a folder if it doesn't exist
:param path: full path of creating directory
:return:
"""
if not exists(path):
os.makedirs(path)
|
from django.db import models
# Create your models here.
from django.db import models
# Create your models here.
class Base(object):
create_time = models.DateField(auto_now_add=True)
update_time = models.DateField(auto_now=True)
class Meta():
abstract = False # 这个类不生成对象
# 考虑后续的扩展
# 分类
class Cate(models.Model, Base):
name = models.CharField(max_length=50, unique=True)
pid = models.IntegerField(default=0, null=False) # 上级分类ID
type = models.IntegerField(default=1) # 标识分类等级
top_id = models.IntegerField(default=0) # 顶级分类ID
is_recommend = models.IntegerField(default=1) # 是否退推荐到首页
pic = models.CharField(max_length=245, default=0) # 图片
class Meta():
db_table = 'cate'
# 标签表
class Tags(models.Model, Base):
name = models.CharField(max_length=50, unique=True)
cid = models.ForeignKey(Cate, to_field='id', on_delete=models.CASCADE) # 上级分类ID,to_field默认为主键,订单的ID
# 是订单号
is_recommend = models.IntegerField(default=1) # 是否退推荐到首页
class Meta():
db_table = 'tag'
# 焦点图表
class Banner(Base, models.Model):
name = models.CharField(max_length=250, unique=True)
is_show = models.IntegerField(default=1) # 是否退推荐到首页
sort = models.IntegerField(default=1) # 图的排序
type = models.IntegerField(default=0) # 焦点图
class Meta():
db_table = 'banner'
# # 新闻表
class News(Base, models.Model):
title = models.CharField(max_length=200, default='')
is_recommend = models.IntegerField(default=0) # 是否退推荐到首页
content = models.TextField()
class Meta():
db_table = 'news'
# status = models.IntegerField(default=0)
# reason = models.CharField(max_length=255)
#
#
# # 用户表
#
# class User(Base, models.Model):
# username = models.CharField(max_length=50)
# password = models.CharField(max_length=50)
# phone = models.CharField(max_length=11)
# email = models.CharField(max_length=50)
# image = models.CharField(max_length=255, default='')
# # 签名...
# is_alive = models.IntegerField(default=0)
#
# # 产品表(没写完)
# 加一个评价数
# 高频页面,不是每次都访问数据库
# 单张图片,用一张表
# 多张图用,一个图片表,(事务)
class Goods(Base, models.Model):
'''评论数,销量需要依赖其他表'''
name = models.CharField(max_length=50)
desc = models.CharField(max_length=255, default='')
price = models.DecimalField(max_digits=64, decimal_places=2, default=999999)
pic = models.CharField(max_length=250, default='', null=False)
stock = models.IntegerField(default=0, null=False) # 库存
# 锁定库存
lock_stock = models.IntegerField(default=0, null=False)
is_recommend = models.IntegerField(default=1) # 是否退推荐到首页
cid = models.ForeignKey('Cate', on_delete=models.CASCADE, default=0)
tid = models.ForeignKey('Tags', on_delete=models.CASCADE, default=0)
top_id = models.IntegerField(default=0) # 顶级分类
comment_time = models.IntegerField(default=0) # 评论数
sales = models.IntegerField(default=0) # 销量
class Meta():
db_table = 'goods'
class Role(models.Model):
name = models.CharField(max_length=50, unique=True)
status = models.IntegerField(default=1)
class Meta():
db_table = 'role'
class User(models.Model):
username = models.CharField(max_length=100)
password = models.CharField(max_length=255)
is_admin = models.IntegerField(default=0)
role_id = models.ForeignKey(Role, on_delete=models.CASCADE)
class Meta():
db_table = 'user'
class Resource(models.Model):
name = models.CharField(max_length=50)
url = models.CharField(max_length=255, default='')
status = models.IntegerField(default=0)
class Meta():
db_table = 'resource'
class Connect(models.Model):
"""关系表"""
role_id = models.ForeignKey(Role, on_delete=models.CASCADE, null=True)
resource_id = models.ForeignKey(Resource, on_delete=models.CASCADE)
class Meta():
db_table = 'connect'
|
import pickle
from pathlib import Path
from collections import defaultdict
import numpy as np
from second.core import box_np_ops
from second.data.dataset import get_dataset_class
from second.utils.progress_bar import progress_bar_iter as prog_bar
def create_groundtruth_database(dataset_class_name,
data_path,
info_path=None,
used_classes=None,
database_save_path=None,
db_info_save_path=None,
relative_path=True,
add_rgb=False,
lidar_only=False,
bev_only=False,
coors_range=None):
dataset = get_dataset_class(dataset_class_name)(
info_path=info_path,
root_path=data_path,
)
root_path = Path(data_path)
if database_save_path is None:
database_save_path = root_path / 'gt_database'
else:
database_save_path = Path(database_save_path)
# There was kitti in the file name
if db_info_save_path is None:
db_info_save_path = root_path / "dbinfos_train.pkl"
database_save_path.mkdir(parents=True, exist_ok=True)
all_db_infos = {}
group_counter = 0
start = 0
# if not (db_info_save_path/"db_checkpoints").as_posix
for j in prog_bar(list(range(start, len(dataset)))):
image_idx = j
sensor_data = dataset.get_sensor_data(j)
# Not the case with nuScenes or Lyft data
if "image_idx" in sensor_data["metadata"]:
image_idx = sensor_data["metadata"]["image_idx"]
# Get GT data
points = sensor_data["lidar"]["points"]
annos = sensor_data["lidar"]["annotations"]
gt_boxes = annos["boxes"]
names = annos["names"]
# Genereate groups
group_dict = {}
group_ids = np.full([gt_boxes.shape[0]], -1, dtype=np.int64)
# Not in case of lyft or nuScenes
if "group_ids" in annos:
group_ids = annos["group_ids"]
else:
# set group ids to number of boxes
group_ids = np.arange(gt_boxes.shape[0], dtype=np.int64)
# Get Difficulty
difficulty = np.zeros(gt_boxes.shape[0], dtype=np.int32)
# Not in case of lyft or nuScenes
if "difficulty" in annos:
difficulty = annos["difficulty"]
# Get Count of Objects and indcies of points in clouds
num_obj = gt_boxes.shape[0]
point_indices = box_np_ops.points_in_rbbox(points, gt_boxes)
# For each object in a sample
for i in range(num_obj):
# # Save point cloud of each object seperately
filename = f"{image_idx}_{names[i]}_{i}.bin"
filepath = database_save_path / filename
gt_points = points[point_indices[:, i]]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
# If class name is whitelisted
if (used_classes is None) or names[i] in used_classes:
if relative_path:
db_path = str(database_save_path.stem + "/" + filename)
else:
db_path = str(filepath)
db_info = {
"name": names[i],
"path": db_path,
"image_idx": image_idx,
"gt_idx": i,
"box3d_lidar": gt_boxes[i],
"num_points_in_gt": gt_points.shape[0],
"difficulty": difficulty[i],
# "group_id": -1,
# "bbox": bboxes[i],
}
local_group_id = group_ids[i]
# if local_group_id >= 0:
# Count objects in a group
# In case of lyft and nuScenes, each object is a group,
# so the counter will be always 1
if local_group_id not in group_dict:
group_dict[local_group_id] = group_counter
group_counter += 1
db_info["group_id"] = group_dict[local_group_id]
# Not in case of lyft or nuScenes
if "score" in annos:
db_info["score"] = annos["score"][i]
if names[i] in all_db_infos:
all_db_infos[names[i]].append(db_info)
else:
all_db_infos[names[i]] = [db_info]
for k, v in all_db_infos.items():
print(f"load {len(v)} {k} database infos")
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
def create_groundtruth_database_parallel(dataset_class_name,
data_path,
info_path=None,
used_classes=None,
database_save_path=None,
db_info_save_path=None,
relative_path=True,
add_rgb=False,
lidar_only=False,
bev_only=False,
coors_range=None):
import ray
dataset = get_dataset_class(dataset_class_name)(
info_path=info_path,
root_path=data_path,
)
root_path = Path(data_path)
if database_save_path is None:
database_save_path = root_path / 'gt_database'
else:
database_save_path = Path(database_save_path)
# There was kitti in the file name
if db_info_save_path is None:
db_info_save_path = root_path / "dbinfos_train.pkl"
database_save_path.mkdir(parents=True, exist_ok=True)
# if not (db_info_save_path/"db_checkpoints").as_posix
all_db_infos = defaultdict(list)
@ray.remote
def get_all_objects(j):
# group_counter = 0
# if not (db_info_save_path/"db_checkpoints").as_posix
image_idx = j
sensor_data = dataset.get_sensor_data(j)
# Not the case with nuScenes or Lyft data
if "image_idx" in sensor_data["metadata"]:
image_idx = sensor_data["metadata"]["image_idx"]
# Get GT data
points = sensor_data["lidar"]["points"]
annos = sensor_data["lidar"]["annotations"]
gt_boxes = annos["boxes"]
names = annos["names"]
# # Genereate groups
# group_dict = {}
# group_ids = np.full([gt_boxes.shape[0]], -1, dtype=np.int64)
# # Not in case of lyft or nuScenes
# if "group_ids" in annos:
# group_ids = annos["group_ids"]
# else:
# # set group ids to number of boxes
# group_ids = np.arange(gt_boxes.shape[0], dtype=np.int64)
# Get Difficulty
difficulty = np.zeros(gt_boxes.shape[0], dtype=np.int32)
# Not in case of lyft or nuScenes
if "difficulty" in annos:
difficulty = annos["difficulty"]
# Get Count of Objects and indcies of points in clouds
num_obj = gt_boxes.shape[0]
point_indices = box_np_ops.points_in_rbbox(points, gt_boxes)
object_instances = list()
# For each object in a sample
for i in range(num_obj):
# # Save point cloud of each object seperately
filename = f"{image_idx}_{names[i]}_{i}.bin"
filepath = database_save_path / filename
gt_points = points[point_indices[:, i]]
gt_points[:, :3] -= gt_boxes[i, :3]
with open(filepath, 'w') as f:
gt_points.tofile(f)
# If class name is whitelisted
if (used_classes is None) or names[i] in used_classes:
if relative_path:
db_path = str(database_save_path.stem + "/" + filename)
else:
db_path = str(filepath)
db_info = {
"name": names[i],
"path": db_path,
"image_idx": image_idx,
"gt_idx": i,
"box3d_lidar": gt_boxes[i],
"num_points_in_gt": gt_points.shape[0],
"difficulty": difficulty[i],
# "group_id": -1,
# "bbox": bboxes[i],
}
# local_group_id = group_ids[i]
# if local_group_id >= 0:
# Not in case of lyft or nuScenes
if "score" in annos:
db_info["score"] = annos["score"][i]
object_instances.append(db_info)
return object_instances
ray.init()
all_object_instanctes = [
get_all_objects.remote(idx) for idx in range(len(dataset))]
collected_objects = ray.get(all_object_instanctes)
group_counter = 0
for sub_list in collected_objects:
for object_ in sub_list:
object_['group_id'] = group_counter
all_db_infos[object_['name']].append(object_)
group_counter += 1
# Not sure if it's a bug, but it seems like group_id is rather
# # global_id of instance object read
# if local_group_id not in group_dict:
# group_dict[local_group_id] = group_counter
# group_counter += 1
#
# db_info["group_id"] = group_dict[local_group_id]
# all_db_infos = ray.get(all_db_infos_par)
for k, v in all_db_infos.items():
print(f"load {len(v)} {k} database infos")
with open(db_info_save_path, 'wb') as f:
pickle.dump(all_db_infos, f)
|
from . import general
|
#!/usr/bin/python3
import cgi
import os
import cgitb
from time import time, localtime, strftime
import datetime
import calendar
cgitb.enable()
clock=strftime("%a, %b %d %Y %H:%M:%S", localtime())
def index():
""" Show the default page
"""
print ('content-type: text/html')
#print ('</html>')
index()
def showForm1():
"""Show a form
"""
print ("""
<html>
<head>
<link rel="stylesheet" href="/test.css">
<script src="/JS_scriptV2.js"></script>
<link href="/nouislider.min.css" rel="stylesheet">
</head>
<div class="container">
<form id="regForm" method=POST action="svinterpreter_aux/results_fantomV2.py">
<script src="/nouislider.min.js"></script>
<h1>Structural Variant Interpreter - SVInterpreter</h1>
<a4>This tool was developed to support prediction of the phenotypic outcome of chromosomal or genomic structural variants (unbalanced and balanced translocations, inversion, insertion, deletions or duplications).</a4>
<p></p>
<a3> Please fill the following form with all the information about the structural variant to be analysed and respective phenotypic characteristics (optional). A table with relevant information for the evaluation of the structural variant will be retrived. </a3>
<div class="row">
<h4><center>Reference Human Genome (version)</h4>
<div class="input-group">
<select name="version">
<option value="B" class="default" >Select Genome Version</option>
<option value="hg19" ><b>Hg19</b></option>
<option value="hg38" ><b>Hg38</b></option>
</select>
</div>
</div>
<div class="row">
<div class="input-group">
<h4><center>Cell line Hi-C data to use as reference</h4>
<a3><center>This data will be used to define the Topological Associated domains (TADs) boundaries and chromatin loops.</center></a3>
<a3><center>All data was retrived from <a href="http://3dgenome.fsm.northwestern.edu/publications.html">YUE Lab website.</a></center></a3>
<p></p>
<select name="tad">
<option value="B" class="default" >Select Cell-line</option>
<option value="consensus">Consensus TADs (Lifei 2019)</option>
<option value="IMR90">IMR90 (Rao 2014)</option>
<option value="LCL">LCL (Rao 2014)</option>
<option value="hesc">hESC (Dixon 2015)</option>
<option value="a549">A549 (Encode 2016)</option>
<option value="aorta">Aorta (Leung 2015)</option>
<option value="cortex">Cortex (Schmitt 2016)</option>
<option value="blader">Bladder (Schmitt 2016))</option>
<option value="lung">Lung (Schmitt 2016)</option>
<option value="huvec">HUVEC (Rao 2014)</option>
<option value="k562">K562 (Rao 2014)</option>
</select>
</div>
</div>
<div class="row">
<p><h4><center> Phenotypic description using HPO (optional)</h4></p>
<div class="input-group">
<a3><center>The terms are separated by commas.</center></a3>
<p></p>
<input type="text" placeholder="HP:0000202, HP:0000157, HP:0006483, HP:0001640, HP:0001961,..." name="hpo_des"/>
</div>
</div>
<div class="row">
<h4><center> Highlighted Inheritance (optional)</h4></center>
<a3><center>All phenotypes are analyzed and presented, but only the ones with the user-selected inheritance are highlighted on the output.</center></a3>
<p></p>
<div class="input-group">
<select name="inh">
<option value="B" class="default" >Select Inheritance</option>
<option value="AD">Autossomal Dominant (AD)</option>
<option value="AR">Autossomal Recessive (AR)</option>
<option value="PD">Pseudoautosomal Dominant (PD)</option>
<option value="PR">Pseudoautosomal Recessive (PR)</option>
<option value="DD">Digenic Dominant (DD)</option>
<option value="DR">Digenic Recessive(DR)</option>
<option value="IC">Isolated Cases (IC)</option>
<option value="ICB">Inherited chromosomal imbalance (ICB)</option>
<option value="Mu">Multifactorial(Mu)</option>
<option value="SMo">Somatic mosaicism (SMo)</option>
<option value="SMu">Somatic mutation (SMu)</option>
<option value="XL">X-linked (XL)</option>
<option value="XLD">X-linked Dominant (XLD)</option>
<option value="XLR">X-linked Recessive (XLR)</option>
<option value="YL">Y-linked (YL)</option>
</select>
</div>
</div>
<div class="row">
<div class="input-group">
<h4><center> Type of structural variant</h4>
<select id="seltest", name="tt", onchange="yesnoCheck(tt);">
<option value="B" class="default" >Select Structural variant</option>
<option value="Balanced_translocation">Balanced Translocation</option>
<option value="Unbalanced_translocation">Unbalanced Translocation</option>
<option value="Inversion">Inversion</option>
<option value="Deletion">Deletion</option>
<option value="Duplication">Duplication</option>
<option value="Insertion">Insertion</option>
<option value="Spec_Rg">Query Genomic region</option>
</select>
</div>
</div>
<div class="row">
<div class="ifYes", id="ifYes">
<a3>This tools accepts coordinates or intervals, with or without commas.</a3>
<p></p>
<a4>Chromosome A</a4>
<input id="chrA" type="text" class="bal" placeholder="6" name="chrA"/>
<p></p>
<a4>Chromosome B</a4>
<input id="chrB" type="text" class="bal" placeholder="7" name="chrB"/>
<p></p>
<a4>Breakpoint A</a4>
<input id="brA" type="text" class="bal" placeholder="168,529,498" name="brA"/>
<p></p>
<a4>Breakpoint B</a4>
<input id="brB" type="text" class="bal" placeholder="168,529,498" name="brB"/>
</div>
</div>
<div class="row">
<div class="ifYes", id="ifno">
<a3>This tools accepts coordinates or intervals, with or without commas.</a3>
<p></p>
<a4>Chromosome </a4>
<input id="chrA" type="text" class="bal" placeholder="6" name="chrA"/>
<p></p>
<a4>Region (start-end)</a4>
<input id="brA" type="text" class="bal" placeholder="168,529,498-168,540,877" name="brA"/>
</div>
</div>
<div class="row">
<div class="ifYes", id="ifdd">
<a3>This tools accepts coordinates or intervals, with or without commas.</a3>
<p></p>
<a4>Chromosome </a4>
<input id="chrA" type="text" class="bal" placeholder="6" name="chrA"/>
<p></p>
<a4>Region (start-end)</a4>
<input id="brA" type="text" class="bal" placeholder="168,529,498-168,540,877" name="brA"/>
<p></p>
<input class="checkbox_input" type="checkbox" name="coupon_question" value="1" id="isov">
<label for="isov">Overlap with public Databases?</label>
</div>
</div>
<div class="row">
<div class="ifYes", id="typeofanaly">
<h4><center> Analysis based on</h4></center>
<select name="tttype" onchange="shohide(this)">
<option value="B" class="default" >Select strategy </option>
<option value="ggg">TADs</option>
<option value="spec">A specific region</option>
</select>
</div>
</div>
<div class="row">
<div class="ifYes" id="formTADs">
<h4><center> TADs to analyse</h4></center>
<a3><center>Use the sliders to define the TADs to analyse. By default, only the breakpoint TAD (brTAD) is analysed.</center></a3>
<p></p>
<script src="nouislider.min.js"></script>
<div id="slider">
</div>
<input type="hidden" name="slider_control" id="slider_control" value="" />
<p> </p>
<script>
var slider = document.getElementById('slider');
noUiSlider.create(slider, {
start: [-0.3, 0.3],
connect: true,
range: {
'min': -5,
'max': 5
},
pips: {
mode: 'positions',
stepped: true,
density: 10,
values: [0,10,20,30,40,50,60,70,80,90,100],
format: {
to: function(value) {
if (value<0){
return ["TAD-1","TAD-2","TAD-3", "TAD-4", "TAD-5"][(value*-1)-1];
}else{
return ["brTAD", "TAD+1", "TAD+2", "TAD+3", "TAD+4", "TAD+5"][value];
}
},
from: Number
}
}
});
var slider_control = document.getElementById('slider_control');
slider.noUiSlider.on('update', function( values) {
slider_control.value = values.join(' - ');
});
</script>
</div>
</div>
<div class="row">
<div class="ifYes" id="dvPassport">
<h4>Insert the region to analyse</h4>
<a3>The Region must follow the chr:start-end format.</a3>
<a3>Make sure to insert a region that includes the variant breakpoint.</a3>
<p></p>
<input type="text" placeholder="chr5:23345543-23357777" name="specreg"/>
</div>
</div>
<div class="row">
<div class="ifYes" id="dvPassport2">
<h4>Insert the regions to analyse</h4>
<a3>The Regions must follow the chr:start-end format.</a3>
<a3>Insert the regions corresponding to each chromossome of the rearrangement.</a3>
<a3>Make sure to insert regions that include the variant breakpoints.</a3>
<p></p>
<a4>Chromosome A region </a4>
<input type="text" placeholder="chr5:23345543-23357777" name="specreg1"/>
<a4>Chromosome B region </a4>
<input type="text" placeholder="chr7:44345543-55777766" name="specreg2"/>
</div>
</div>
<div class="row">
<div style="display: none;" id="ifoverlap">
<h4><center>Databases to used on the overlap search</center></h4>
<input type="checkbox" name="dats[]" value="DGV" id="dgv" checked />
<label style="word-wrap:break-word" for="dgv">DGV database</label>
<p><input type="checkbox" name="dats[]" value="1000Genomes" id="1000genomes" checked/>
<label style="word-wrap:break-word" for="1000genomes">1000 Genomes database</label>
<p><input type="checkbox" name="dats[]" value="ClinGen" id="clingen" checked/>
<label style="word-wrap:break-word" for="clingen">ClinGen/ClinVar databases</label>
<p><input type="checkbox" name="dats[]" value="deldupsindrome" id="deldupsindrome" checked/>
<label style="word-wrap:break-word" for="deldupsindrome">Deletion/Duplication syndromes</label>
<p><input type="checkbox" name="dats[]" value="CoeCoop" id="coecoop" checked/>
<label style="word-wrap:break-word" for="coecoop">DDelay studies (Cooper et al. 2011; Coe et al. 2012)</label>
<p><input type="checkbox" name="dats[]" value="collins" id="collins" checked/>
<label style="word-wrap:break-word" for="collins">Collins et al. 2017</label>
<p><input type="checkbox" name="dats[]" value="chaisson" id="chaisson" checked/>
<label style="word-wrap:break-word" for="chaisson">Chaisson et al. 2019</label>
<p><input type="checkbox" name="dats[]" value="gnomad" id="gnomad" checked/>
<label style="word-wrap:break-word" for="gnomad">Gnomad SV database</label>
<p> </p>
<h4><center>Type of overlap search:</center></h4>
<select id="ovl" name="ovl">
<option value="B" class="default" >Selection strategy</option>
<option value="mutual">Mutual Overlap</option>
<option value="full">Query comprised by the reference</option>
</select>
</div>
</div>
<div class="row">
<div style="display: none;" id="ifmutual">
<h4> <center> Mutual Overlap </center></h4>
<a3>Choosing Mutual Overlap, this tool will apply a mutual overlap cut-off on the search:</a3>
<a3> Database hits are only retrieved if percentages of overlap of query vs database and database vs query are above the cut-off.</a3>
<a3><p><b>Overlap cutoff (1-100)%:</b></p></a3>
<input type="text" name="perc" value="70">
</div>
</div>
<div class="row">
<div style="display: none;" id="iffull">
<h4> <center> Query comprised by the reference </center></h4>
<a3> Choosing query comprised by the reference, this tool will retrieve all database hits that covers 100% of the query, independently of the database entry size.<a3>
</div>
</div>
<div class="row">
<div class="ifYes", id="ifins">
<a3>This tools accepts coordinates or intervals, with or without commas.</a3>
<p></p>
<a4>Recipient Chromosome </a4>
<input id="chrA" type="text" class="bal" placeholder="6" name="chrA"/>
<p></p>
<a4>Donor Chromosome </a4>
<input id="chrB" type="text" class="bal" placeholder="8" name="chrB"/>
<p></p>
<a4>Recipient Breakpoint</a4>
<input id="brA" type="text" class="bal" placeholder="116812107-116912603" name="brA"/>
<p></p>
<a4>Inserted region</a4>
<input id="brB" type="text" class="bal" placeholder="168,539,498" name="brB"/>
</div>
</div>
<div class="row">
<div class="ifYes", id="ifmeh">
<a3>This tools accepts coordinates or intervals, with or without commas.</a3>
<p></p>
<a4>Chromosome A </a4>
<input id="chrA" type="text" class="bal" placeholder="6" name="chrA"/>
<p></p>
<a4>Breakpoint A</a4>
<input id="brA" type="text" class="bal" placeholder="116812107-116912603" name="brA"/>
<p></p>
<input type="radio" name="vvv" value="del" id="del1"/>
<label for="del1">Deletion</label>
<input type="radio" name="vvv" value="dup" id="dup1"/>
<label for="dup1">Duplication</label>
<p></p>
<a4>Chromosome B </a4>
<input id="chrB" type="text" class="bal" placeholder="8" name="chrB"/>
<p></p>
<a4>Breakpoint B</a4>
<input id="brB" type="text" class="bal" placeholder="16812107-16912603" name="brB"/>
<p></p>
<input type="radio" name="ddd" value="del" id="del"/>
<label for="del">Deletion</label>
<input type="radio" name="ddd" value="dup" id="dup"/>
<label for="dup">Duplication</label>
</div>
</div>
<p><input type="submit" value="Submit"></b></p></center>
</form>
<p><button onClick="window.location.href=window.location.href">Clean Form</button></p>
</div>
<p></p>
<a3><center>If you using this tool please acknowledge it by citing <a href="https://link.springer.com/article/10.1007/s00439-020-02121-x">our reference publication</a></center>
<center><address>
Correspondance: <a href="mailto:doencasgenomicas@insa.min-saude.pt">Genomic Diseases Group</a>
</address>
<center><aaa><a href="http://www.insa.min-saude.pt/category/areas-de-atuacao/genetica-humana/">Department of Human Genetics</a></aaa></center>
<p>National Institute of Health Doutor Ricardo Jorge</p> </aaa></center>
<center><img src="https://cld.pt/dl/download/bf231ea4-336c-47c2-98a9-5129c3af3510/aaa.png" width=500 height=80 border=0 alt=""><br><br />
<center><p><rodape>This file was last modified 28/12/2020</p></font></a3>
</html>
""")
showForm1()
#<input type="reset">
#<p><button onClick="window.location.href=window.location.href">Clean Form</button></p>
|
from __future__ import print_function, absolute_import
import logging
import re
import json
import requests
import uuid
import time
import os
import argparse
import uuid
import datetime
import socket
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.io.filesystems import FileSystems
from apache_beam.metrics import Metrics
from apache_beam.metrics.metric import MetricsFilter
from apache_beam import pvalue
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
TABLE_SCHEMA = (
'IDKEY:STRING, '
'FECHA:STRING, '
'NRO__PED__SAP:STRING, '
'NRO__PED__TV:STRING, '
'VIA_DE_PAGO:STRING, '
'C__CLASE_PEDIDO:STRING, '
'N__CLASE_PEDIDO:STRING, '
'ESTADO:STRING, '
'NIVEL_SERVICIO:STRING, '
'FECHA_TV:STRING, '
'FECHA_LIBERACION:STRING, '
'FECHA_INTEGRACION:STRING, '
'HORA_INTEGRACION:STRING, '
'HORA:STRING, '
'FECHA_FACTURA:STRING, '
'F_VENCIMIENTO_CREDITO:STRING, '
'CLASE_DE_CONDICION:STRING, '
'NRO__ENTREGA:STRING, '
'NRO__FACTURA:STRING, '
'DOCTO__CLIENTE:STRING, '
'NRO__CLIENTE:STRING, '
'NOMBRE_CLIENTE:STRING, '
'TELEFONO:STRING, '
'EMAIL_CLIENTE:STRING, '
'DIRECCION_CLIENTE:STRING, '
'CIUDAD:STRING, '
'DEPARTAMENTO:STRING, '
'C__PEDIDO:STRING, '
'C__OR__ENTREGA:STRING, '
'C__DIF__ENTREGA:STRING, '
'C__F__ENTREGA:STRING, '
'C__DIF__PICKING:STRING, '
'C__FACTURA:STRING, '
'C__NO_ENVIADA:STRING, '
'VLR__PEDIDO:STRING, '
'VLR__PAYU:STRING, '
'VLR__CREDITO_CLIENTE:STRING, '
'CREDITO_CONTAB_:STRING, '
'CREDITO_COMPENSADO:STRING, '
'CREDITO_FALTANTE:STRING, '
'VLR__COD:STRING, '
'VLR__BONO:STRING, '
'VLR__TIENDA:STRING, '
'VLR__TOTAL:STRING, '
'VLR__FACTURA:STRING, '
'VLR__COSTO:STRING, '
'VLR__UTILIDAD:STRING, '
'MARGEN:STRING, '
'VLR__DEBITOS:STRING, '
'VLR__CREDITOS:STRING, '
'VLR__SALDO_PEDIDO:STRING, '
'ESTADO_CARTERA_PEDIDO:STRING, '
'ESTADO_CARTERA_CLIENTE:STRING, '
'VLR__SALDO_CLIENTE:STRING, '
'NRO__GUIA:STRING, '
'ESTADO_DESPACHO:STRING, '
'FECHA_DESPACHO:STRING, '
'TRASPORTADORA:STRING, '
'CICLO_CAD:STRING, '
'CICLO_TOTAL:STRING, '
'EMAIL_ASESOR:STRING, '
'NOMBRE_ASESOR:STRING, '
'RECAUDADOR:STRING, '
'CONTABILIZACION_FACTURA:STRING, '
'OBSERVACIONES:STRING, '
'FECHA_DE_PAGO:STRING '
)
# ?
class formatearData(beam.DoFn):
def __init__(self, mifecha):
super(formatearData, self).__init__()
self.mifecha = mifecha
def process(self, element):
# print(element)
arrayCSV = element.split(';')
tupla= {'idkey' : str(uuid.uuid4()),
# 'fecha' : datetime.datetime.today().strftime('%Y-%m-%d'),
'fecha': self.mifecha,
'NRO__PED__SAP' : arrayCSV[0],
'NRO__PED__TV' : arrayCSV[1],
'VIA_DE_PAGO' : arrayCSV[2],
'C__CLASE_PEDIDO' : arrayCSV[3],
'N__CLASE_PEDIDO' : arrayCSV[4],
'ESTADO' : arrayCSV[5],
'NIVEL_SERVICIO' : arrayCSV[6],
'FECHA_TV' : arrayCSV[7],
'FECHA_LIBERACION' : arrayCSV[8],
'FECHA_INTEGRACION' : arrayCSV[9],
'HORA_INTEGRACION' : arrayCSV[10],
'HORA' : arrayCSV[11],
'FECHA_FACTURA' : arrayCSV[12],
'F_VENCIMIENTO_CREDITO' : arrayCSV[13],
'CLASE_DE_CONDICION' : arrayCSV[14],
'NRO__ENTREGA' : arrayCSV[15],
'NRO__FACTURA' : arrayCSV[16],
'DOCTO__CLIENTE' : arrayCSV[17],
'NRO__CLIENTE' : arrayCSV[18],
'NOMBRE_CLIENTE' : arrayCSV[19],
'TELEFONO' : arrayCSV[20],
'EMAIL_CLIENTE' : arrayCSV[21],
'DIRECCION_CLIENTE' : arrayCSV[22],
'CIUDAD' : arrayCSV[23],
'DEPARTAMENTO' : arrayCSV[24],
'C__PEDIDO' : arrayCSV[25],
'C__OR__ENTREGA' : arrayCSV[26],
'C__DIF__ENTREGA' : arrayCSV[27],
'C__F__ENTREGA' : arrayCSV[28],
'C__DIF__PICKING' : arrayCSV[29],
'C__FACTURA' : arrayCSV[30],
'C__NO_ENVIADA' : arrayCSV[31],
'VLR__PEDIDO' : arrayCSV[32],
'VLR__PAYU' : arrayCSV[33],
'VLR__CREDITO_CLIENTE' : arrayCSV[34],
'CREDITO_CONTAB_' : arrayCSV[35],
'CREDITO_COMPENSADO' : arrayCSV[36],
'CREDITO_FALTANTE' : arrayCSV[37],
'VLR__COD' : arrayCSV[38],
'VLR__BONO' : arrayCSV[39],
'VLR__TIENDA' : arrayCSV[40],
'VLR__TOTAL' : arrayCSV[41],
'VLR__FACTURA' : arrayCSV[42],
'VLR__COSTO' : arrayCSV[43],
'VLR__UTILIDAD' : arrayCSV[44],
'MARGEN' : arrayCSV[45],
'VLR__DEBITOS' : arrayCSV[46],
'VLR__CREDITOS' : arrayCSV[47],
'VLR__SALDO_PEDIDO' : arrayCSV[48],
'ESTADO_CARTERA_PEDIDO' : arrayCSV[49],
'ESTADO_CARTERA_CLIENTE' : arrayCSV[50],
'VLR__SALDO_CLIENTE' : arrayCSV[51],
'NRO__GUIA' : arrayCSV[52],
'ESTADO_DESPACHO' : arrayCSV[53],
'FECHA_DESPACHO' : arrayCSV[54],
'TRASPORTADORA' : arrayCSV[55],
'CICLO_CAD' : arrayCSV[56],
'CICLO_TOTAL' : arrayCSV[57],
'EMAIL_ASESOR' : arrayCSV[58],
'NOMBRE_ASESOR' : arrayCSV[59],
'RECAUDADOR' : arrayCSV[60],
'CONTABILIZACION_FACTURA' : arrayCSV[61],
'OBSERVACIONES' : arrayCSV[62],
'FECHA_DE_PAGO' : arrayCSV[63]
}
return [tupla]
def run(archivo, mifecha):
gcs_path = "gs://ct-sensus" #Definicion de la raiz del bucket
gcs_project = "contento-bi"
mi_runer = ("DirectRunner", "DataflowRunner")[socket.gethostname()=="contentobi"]
pipeline = beam.Pipeline(runner=mi_runer, argv=[
"--project", gcs_project,
"--staging_location", ("%s/dataflow_files/staging_location" % gcs_path),
"--temp_location", ("%s/dataflow_files/temp" % gcs_path),
"--output", ("%s/dataflow_files/output" % gcs_path),
"--setup_file", "./setup.py",
"--max_num_workers", "5",
"--subnetwork", "https://www.googleapis.com/compute/v1/projects/contento-bi/regions/us-central1/subnetworks/contento-subnet1"
# "--num_workers", "30",
# "--autoscaling_algorithm", "NONE"
])
# lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181206 1100.csv", skip_header_lines=1)
#lines = pipeline | 'Lectura de Archivo' >> ReadFromText("gs://ct-bancolombia/info-segumiento/BANCOLOMBIA_INF_SEG_20181129 0800.csv", skip_header_lines=1)
lines = pipeline | 'Lectura de Archivo' >> ReadFromText(archivo, skip_header_lines=1)
transformed = (lines | 'Formatear Data' >> beam.ParDo(formatearData(mifecha)))
# lines | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_prej_small", file_name_suffix='.csv',shard_name_template='')
# transformed | 'Escribir en Archivo' >> WriteToText("archivos/Info_carga_banco_seg", file_name_suffix='.csv',shard_name_template='')
#transformed | 'Escribir en Archivo' >> WriteToText("gs://ct-bancolombia/info-segumiento/info_carga_banco_seg",file_name_suffix='.csv',shard_name_template='')
transformed | 'Escritura a BigQuery Hana Hermeco' >> beam.io.WriteToBigQuery(
gcs_project + ":Hermeco.Hana",
schema=TABLE_SCHEMA,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED,
write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND
)
# transformed | 'Borrar Archivo' >> FileSystems.delete('gs://ct-avon/prejuridico/AVON_INF_PREJ_20181111.TXT')
# 'Eliminar' >> FileSystems.delete (["archivos/Info_carga_avon.1.txt"])
jobObject = pipeline.run()
# jobID = jobObject.job_id()
return ("Corrio Full HD") |
'''
Baseline methods.
-various LOF-based methods
-isolation forest
-dbscan
-l2
-elliptic envelope
-naive spectral
-
'''
import torch
import numpy as np
import sklearn
import sklearn.ensemble
import sklearn.covariance
import sklearn.cluster
import random
import utils
import pdb
'''
kNN method that uses distances to k nearest neighbors
as scores. (global method)
Input:
-X: data, 2D tensor.
'''
def knn_dist(X, k=10, sum_dist=False):
min_dist, idx = utils.dist_rank(X, k=k, largest=False)
if sum_dist:
dist_score = min_dist.sum(-1)
else:
dist_score = min_dist.mean(-1)
return dist_score
'''
Lof method using reachability criteria to determine density.
(Local method.)
'''
def knn_dist_lof(X, k=10):
X_len = len(X)
#dist_ = dist(X, X)
#min_dist, min_idx = torch.topk(dist_, dim=-1, k=k, largest=False)
min_dist, min_idx = utils.dist_rank(X, k=k, largest=False)
kth_dist = min_dist[:, -1]
# sum max(kth dist, dist(o, p)) over neighbors o of p
kth_dist_exp = kth_dist.expand(X.size(0), -1) #n x n
kth_dist = torch.gather(input=kth_dist_exp, dim=1, index=min_idx)
min_dist[kth_dist > min_dist] = kth_dist[kth_dist > min_dist]
#inverse of lrd scores
dist_avg = min_dist.mean(-1).clamp(min=0.0001)
compare_density = False
if compare_density:
#compare with density. Get kth neighbor index.
dist_avg_exp = dist_avg.unsqueeze(-1) / dist_avg.unsqueeze(0).expand(X_len, -1)
#lof = torch.zeros(X_len, 1).to(utils.device)
lof = torch.gather(input=dist_avg_exp, dim=-1, index=min_idx).sum(-1)
torch.scatter_add_(lof, dim=-1, index=min_idx, src=dist_avg_exp)
return -lof.squeeze(0)
return dist_avg
'''
LoOP: kNN based method using quadratic mean distance to estimate density.
LoOP (Local Outlier Probabilities) (Kriegel et al. 2009a)
'''
def knn_dist_loop(X, k=10):
dist_ = dist(X, X)
min_dist, idx = torch.topk(dist_, dim=-1, k=k, largest=False)
dist_avg = (min_dist**2).mean(-1).sqrt()
return dist_avg
'''
Isolation forest to compute outlier scores.
Returns: The higher the score, the more likely to be outlier.
'''
def isolation_forest(X):
X = X.cpu().numpy()
model = sklearn.ensemble.IsolationForest(contamination='auto', behaviour='new')
#labels = model.fit_predict(X)
model.fit(X)
scores = -model.decision_function(X)
#labels = torch.from_numpy(labels).to(utils.device)
#scores = torch.zeros_like(labels)
#scores[labels==-1] = 1
return torch.from_numpy(scores).to(utils.device)
'''
Elliptic envelope
Returns: The higher the score, the more likely to be outlier.
'''
def ellenv(X):
X = X.cpu().numpy()
model = sklearn.covariance.EllipticEnvelope(contamination=0.2)
#ensemble.IsolationForest(contamination='auto', behaviour='new')
model.fit(X)
scores = -model.decision_function(X)
#labels = torch.from_numpy(labels).to(utils.device)
#scores = torch.zeros_like(labels)
#scores[labels==-1] = 1
return torch.from_numpy(scores).to(utils.device)
'''
Local outlier factor.
'''
def lof(X):
#precompute distances to accelerate LOF
dist_mx = dist(X, X)
dist_mx = dist_mx.cpu().numpy()
#metric by default is minkowski with p=2
model = sklearn.neighbors.LocalOutlierFactor(n_neighbors=20, metric='precomputed', contamination='auto')
labels = model.fit_predict(dist_mx)
labels = torch.from_numpy(labels).to(utils.device)
scores = torch.zeros_like(labels)
scores[labels==-1] = 1
return scores
'''
DBSCAN, density based, mark points as inlier if
they either have lots of neighbors or have inliers
as their neighbors.
-X are points, not pairwise distances.
Returns:
-scores, 1 means outlier.
'''
def dbscan(X):
X = X.cpu().numpy()
model = sklearn.cluster.DBSCAN(min_samples=10)
model.fit(X)
#-1 means "outlier"
labels = model.labels_
labels = torch.from_numpy(labels).to(utils.device)
scores = torch.zeros_like(labels)
scores[labels==-1] = 1
return scores
'''
Compute score using l2 distance to the mean
Higher scores mean more likely outliers.
'''
def l2(X):
scores = ((X - X.mean(0))**2).sum(-1)
return scores
'''
Input:
-X, Y: 2D tensors
'''
def dist(X, Y):
X_norms = torch.sum(X**2, dim=1).view(-1, 1)
Y_norms = torch.sum(Y**2, dim=1).view(1, -1)
cur_distances = X_norms + Y_norms - 2*torch.mm(X, Y.t())
return cur_distances
|
import numpy as np
arr = []
val = list()
for arr_i in xrange(6):
arr_temp = map(int,raw_input().strip().split(' '))
arr.append(arr_temp)
mask = [0,1,2,7,11,12,13]
for i in range(4):
for j in range(4):
val = val.append(sum([arr[i] for i in mask]))
mask = np.add(mask,1)
mask = np.add(mask,4)
print max(val)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.