text
stringlengths 8
6.05M
|
|---|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/6/10 下午3:06
# @Author : Lucas Ma
# @File : getObjInfo
# 获取对象信息
import types
def fn():
pass
print(type(fn))
|
#!/usr/bin/env python3
#-*- coding:utf-8 -*-
"""
Created on 2020/06/30
author: relu
"""
import os
import cv2
import time
import numpy as np
import albumentations as alt
from albumentations.pytorch import ToTensorV2 as ToTensor
# from matplotlib import pyplot as plt
from IPython import embed
def augment_and_show(aug, image, savename):
image = aug(image=image)['image']
image = image.permute(1, 2, 0).numpy()
cv2.imwrite(savename, image)
def faceaug():
''' choose the augmentation for face-recognition '''
aug = alt.Compose([
alt.HorizontalFlip(p=0.5),
alt.HueSaturationValue(hue_shift_limit=10, sat_shift_limit=20, val_shift_limit=10, p=0.2),
alt.RGBShift(r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, p=0.2),
alt.RandomBrightnessContrast(brightness_limit=0.2, contrast_limit=0.2, p=0.3),
alt.ToGray(p=0.01),
alt.MotionBlur(blur_limit=7, p=0.2), # default=11
alt.GaussianBlur(blur_limit=7, p=0.2), # default=11
alt.GaussNoise(var_limit=(5.0, 20.0), mean=0, p=0.1), # default var_limit=(10.0, 30.0)
alt.ISONoise(p=0.2),
# alt.Normalize(),
ToTensor()])
return aug
if __name__ == "__main__":
root_dir = '/Users/relu/data/passdoor'
src_dir = os.path.join(root_dir, 'align_check')
tar_dir = os.path.join(root_dir, 'align_aug_2.0')
if not os.path.exists(tar_dir):
os.mkdir(tar_dir)
aug = faceaug()
start_time = time.time()
idx = 0
for file in os.listdir(src_dir):
src_file = os.path.join(src_dir, file)
tar_file = os.path.join(tar_dir, file)
img = cv2.imread(src_file)
augment_and_show(aug, img, tar_file)
if (idx + 1) % 5000 == 0:
print('already processed %3d files, total %3d files' % (idx+1, len(os.listdir(src_dir))))
idx += 1
finish_time = time.time()
total_time = (finish_time - start_time) / 60
print('augmentation costs %.4f mins' % total_time)
|
import hashlib
from django.template import TemplateDoesNotExist
from django.template.loaders.cached import Loader as DjangoCachedLoader
from django.utils.encoding import force_bytes
from amp_tools import get_amp_detect
from amp_tools.settings import settings
from amp_tools.compat import BaseLoader, template_loader, template_from_string
class Loader(BaseLoader):
is_usable = True
_template_source_loaders = None
def get_contents(self, origin):
return origin.loader.get_contents(origin)
def get_template_sources(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
if hasattr(loader, 'get_template_sources'):
try:
for result in loader.get_template_sources(template_name, template_dirs):
yield result
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def prepare_template_name(self, template_name):
template_name = u'%s/%s' % (get_amp_detect(), template_name)
if settings.AMP_TOOLS_TEMPLATE_PREFIX:
template_name = settings.AMP_TOOLS_TEMPLATE_PREFIX + template_name
return template_name
def load_template(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
try:
return loader(template_name, template_dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist("Tried %s" % template_name)
def load_template_source(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
if hasattr(loader, 'load_template_source'):
try:
return loader.load_template_source(
template_name,
template_dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist("Tried %s" % template_name)
@property
def template_source_loaders(self):
if not self._template_source_loaders:
loaders = []
for loader_name in settings.AMP_TOOLS_TEMPLATE_LOADERS:
loader = template_loader(loader_name)
if loader is not None:
loaders.append(loader)
self._template_source_loaders = tuple(loaders)
return self._template_source_loaders
class CachedLoader(DjangoCachedLoader):
is_usable = True
def cache_key(self, template_name, template_dirs, *args):
if len(args) > 0: # Django >= 1.9
key = super(CachedLoader, self).cache_key(template_name, template_dirs, *args)
else:
if template_dirs:
key = '-'.join([
template_name,
hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()
])
else:
key = template_name
return '{0}:{1}'.format(get_amp_detect(), key)
def load_template(self, template_name, template_dirs=None):
key = self.cache_key(template_name, template_dirs)
template_tuple = self.template_cache.get(key)
if template_tuple is TemplateDoesNotExist:
raise TemplateDoesNotExist('Template not found: %s' % template_name)
elif template_tuple is None:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = template_from_string(template)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
self.template_cache[key] = (template, origin)
self.template_cache[key] = (template, None)
return self.template_cache[key]
|
pov = dict(nombre="kevin",edad=17,sexo="masculino",intereses="videojuegos")
pov.reaplace[nombre],"JOSUE"
print(pov)
|
#--------------------------------------------------------
#
#trajvars.py
#--------------------------------------------------------
# input classes
class trajectory():
def __init__(self):
self.name=''
self.ID=0
self.numpoints=0
self.numtraj=0
self.grid=[]
self.year=[]
self.month=[]
self.date=[]
self.hour=[]
self.min=[]
self.forcasthour=[]
self.age=[]
self.lat=[]
self.lon=[]
self.hag=[]
self.pressure=[]
self.theta=[]
self.airtemp=[]
self.rain=[]
self.mixdepth=[]
self.rh=[]
self.msl=[]
self.flux=[]
# output classes
class srifcount():
def __init__(self):
self.ID=0
self.count1km=0.
self.count2km=0.
self.count3km=0.
self.countTotal=0.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-10-16 22:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dashboard_app', '0003_auto_20181016_2230'),
]
operations = [
migrations.RemoveField(
model_name='message',
name='recipient',
),
migrations.AddField(
model_name='message',
name='poster',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='messages', to='dashboard_app.User'),
preserve_default=False,
),
]
|
import unicodedata
from datetime import datetime
from django import forms
from django.contrib.auth import (
authenticate, get_user_model, password_validation,
)
from django.contrib.auth.hashers import (
UNUSABLE_PASSWORD_PREFIX, identify_hasher,
)
from django.contrib.auth.models import User
from django.contrib.auth.tokens import default_token_generator
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
from django.urls import reverse_lazy
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.text import capfirst
from django.utils.translation import gettext, gettext_lazy as _
from email.mime.image import MIMEImage
from django.contrib.staticfiles import finders
from .tokens import account_activation_token
from .decorators import parsleyfy
from .models import User, Profile, Feedback, ContactUs, ContactUsSettings
UserModel = get_user_model()
class ReadOnlyPasswordHashWidget(forms.Widget):
template_name = 'credential/password_reset/auth/widgets/read_only_password_hash.html'
read_only = True
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
summary = []
if not value or value.startswith(UNUSABLE_PASSWORD_PREFIX):
summary.append({'label': gettext("No password set.")})
else:
try:
hasher = identify_hasher(value)
except ValueError:
summary.append({'label': gettext("Invalid password format or unknown hashing algorithm.")})
else:
for key, value_ in hasher.safe_summary(value).items():
summary.append({'label': gettext(key), 'value': value_})
context['summary'] = summary
return context
class ReadOnlyPasswordHashField(forms.Field):
widget = ReadOnlyPasswordHashWidget
def __init__(self, *args, **kwargs):
kwargs.setdefault("required", False)
super().__init__(*args, **kwargs)
def bound_data(self, data, initial):
# Always return initial because the widget doesn't
# render an input field.
return initial
def has_changed(self, initial, data):
return False
class UsernameField(forms.CharField):
def to_python(self, value):
return unicodedata.normalize('NFKC', super().to_python(value))
def widget_attrs(self, widget):
return {
**super().widget_attrs(widget),
'autocapitalize': 'none',
'autocomplete': 'username',
}
@parsleyfy
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
first_name = forms.CharField(
max_length=255, required=True, error_messages={'required': 'First Name field is required.'}
)
last_name = forms.CharField(
max_length=255, required=True, error_messages={'required': 'Last Name field is required.'}
)
email = forms.EmailField(
max_length=255, required=True, error_messages={
'required': 'Email field is required.', "invalid": "Invalid email",
'unique': "A user with that email already exists.",
# 'remote-message': "User with this email is already exists.",
}
)
username = UsernameField(
max_length=255, required=True, error_messages={
'required': 'Username field is required.',
'unique': _("A user with that username already exists."),
},
)
password1 = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html(),
error_messages={'required': 'Password field is required.'},
)
password2 = forms.CharField(
label=_("Password confirmation"),
widget=forms.PasswordInput,
strip=False,
# help_text=_("Enter the same password as before, for verification."),
error_messages={
'required': 'Password Confirmation field is required.',
'equalto': 'Your passwords do not match',
},
)
class Meta:
model = User
fields = ("username", "first_name", "last_name", "email",)
field_classes = {'username': UsernameField}
parsley_extras = {
# 'username': {
# 'remote': 'unique',
# 'error-message': "A user with that username already exists."
# },
# 'email': {
# 'remote': 'unique',
# 'error-message': "A user with that email already exists."
# },
'password2': {
'equalto': "password1",
'error-message': "The two password fields didn't match.",
},
}
def __init__(self, *args, **kwargs):
super(UserCreationForm, self).__init__(*args, **kwargs)
self.fields['first_name'].widget.attrs['placeholder'] = "First Name"
self.fields['last_name'].widget.attrs['placeholder'] = "Last Name"
self.fields['email'].widget.attrs['placeholder'] = "Email"
self.fields['username'].widget.attrs['placeholder'] = "Username"
self.fields['password1'].widget.attrs['placeholder'] = "Password"
self.fields['password2'].widget.attrs['placeholder'] = "Password Confirmation"
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def _post_clean(self):
super()._post_clean()
# Validate the password after self.instance is updated with form data
# by super().
password = self.cleaned_data.get('password2')
if password:
try:
password_validation.validate_password(password, self.instance)
except forms.ValidationError as error:
self.add_error('password2', error)
def save(self, commit=True):
user = super().save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
@parsleyfy
class ChristianBaseAuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
username = UsernameField(
max_length=254,
widget=forms.TextInput(attrs={'autofocus': True}),
error_messages={
'required': 'Username field is required.',
},
)
password = forms.CharField(
label=_("Password"),
strip=False,
widget=forms.PasswordInput,
error_messages={
'required': 'Password field is required.',
},
)
error_messages = {
'invalid_login': _(
"Please enter a correct %(username)s and password."
),
'inactive': _("This account is inactive."),
}
def __init__(self, request=None, *args, **kwargs):
"""
The 'request' parameter is set for custom auth use by subclasses.
The form data comes in via the standard 'data' kwarg.
"""
self.request = request
self.user_cache = None
super().__init__(*args, **kwargs)
self.fields['username'].widget.attrs['placeholder'] = "Your Username"
self.fields['password'].widget.attrs['placeholder'] = "Your password"
# Set the label for the "username" field.
self.username_field = UserModel._meta.get_field(UserModel.USERNAME_FIELD)
if self.fields['username'].label is None:
self.fields['username'].label = capfirst(self.username_field.verbose_name)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
self.user_cache = authenticate(self.request, username=username, password=password)
if self.user_cache is None:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
self.confirm_login_allowed(self.user_cache)
return self.cleaned_data
def confirm_login_allowed(self, user):
"""
Controls whether the given User may log in. This is a policy setting,
independent of end-user authentication. This default behavior is to
allow login by active users, and reject login by inactive users.
If the given user cannot log in, this method should raise a
``forms.ValidationError``.
If the given user may log in, this method should return None.
"""
if not user.is_active:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
)
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
@parsleyfy
class ChristianBaseForgotPasswordAndUserResendActivationForm(forms.Form):
email = forms.EmailField(
max_length=255, required=True, error_messages={
'required': 'Email field is required.',
"invalid": "Invalid email",
}
)
def __init__(self, *args, **kwargs):
super(ChristianBaseForgotPasswordAndUserResendActivationForm, self).__init__(*args, **kwargs)
self.fields['email'].widget.attrs['placeholder'] = "Your email"
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email, html_email_template_name=None):
"""
Send a django.core.mail.EmailMultiAlternatives to `to_email`.
"""
subject = render_to_string(subject_template_name, context)
# Email subject *must not* contain newlines
subject = ''.join(subject.splitlines())
body = render_to_string(email_template_name, context)
email_message = EmailMultiAlternatives(subject, body, from_email, [to_email])
email_message.attach_alternative(body, 'text/html')
if html_email_template_name is not None:
html_email = render_to_string(html_email_template_name, context)
email_message.attach_alternative(html_email, 'text/html')
email_message.send()
def get_users(self, email):
"""Given an email, return matching user(s) who should receive a reset.
This allows subclasses to more easily customize the default policies
that prevent inactive users and users with unusable passwords from
resetting their password.
"""
active_users = UserModel._default_manager.filter(**{
'%s__iexact' % UserModel.get_email_field_name(): email,
'is_active': True,
})
return (u for u in active_users if u.has_usable_password())
def save(self, domain_override=None,
subject_template_name='email/password_reset_subject.txt',
email_template_name='email/password_reset.html',
use_https=False, token_generator=default_token_generator,
from_email=None, request=None, html_email_template_name=None,
extra_email_context=None):
"""
Generate a one-use only link for resetting password and send it to the
user.
"""
myDate = datetime.now() # get today datetime + year...
email = self.cleaned_data["email"]
for user in self.get_users(email):
if not domain_override:
current_site = get_current_site(request)
site_name = current_site.name
domain = current_site.domain
else:
site_name = domain = domain_override
context = {
'email': email,
'domain': domain,
'site_name': site_name,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'user': user,
'token': token_generator.make_token(user),
'protocol': 'https' if use_https else 'http',
'myDate': myDate,
}
if extra_email_context is not None:
context.update(extra_email_context)
self.send_mail(
subject_template_name, email_template_name, context, from_email,
email, html_email_template_name=html_email_template_name,
)
# def logo_data():
# """ this fuction read/find logo image directory. """
# with open(finders.find('images/logo_text1.png'), 'rb') as f:
# logo_data = f.read()
# logo = MIMEImage(logo_data)
# logo.add_header('Content-ID', '<logo>')
# return logo
@parsleyfy
class ChristianBaseSetPasswordForm(forms.Form):
"""
A form that lets a user change set their password without entering the old
password
"""
error_messages = {
'password_mismatch': _("The two password fields didn't match."),
}
new_password1 = forms.CharField(
label=_("New Password"),
strip=False,
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html(),
error_messages={'required': 'New Password field is required.'},
)
new_password2 = forms.CharField(
label=_("New Password confirmation"),
widget=forms.PasswordInput,
strip=False,
# help_text=_("Enter the same password as before, for verification."),
error_messages={'required': 'New Password Confirmation field is required.'},
)
class Meta:
parsley_extras = {
'new_password2': {
'equalto': "new_password1",
'error-message': "The two password fields didn't match.",
},
}
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
self.fields['new_password1'].widget.attrs['placeholder'] = "Your New Password"
self.fields['new_password2'].widget.attrs['placeholder'] = "Your New Password Confirmation"
def clean_new_password2(self):
password1 = self.cleaned_data.get('new_password1')
password2 = self.cleaned_data.get('new_password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
password = self.cleaned_data["new_password1"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
@parsleyfy
class ChristianBaseChangePasswordForm(ChristianBaseSetPasswordForm):
"""
A form that lets a user change their password by entering their old
password.
"""
error_messages = {
**ChristianBaseSetPasswordForm.error_messages,
'password_incorrect': _("Your old password was entered incorrectly. Please enter it again."),
}
old_password = forms.CharField(
label=_("Old password"),
strip=False,
widget=forms.PasswordInput,
error_messages={'required': 'Old Password field is required.'},
)
new_password1 = forms.CharField(
label=_("New Password"),
strip=False,
widget=forms.PasswordInput,
help_text=password_validation.password_validators_help_text_html(),
error_messages={'required': 'New Password field is required.'},
)
new_password2 = forms.CharField(
label=_("New Password confirmation"),
widget=forms.PasswordInput,
strip=False,
# help_text=_("Enter the same password as before, for verification."),
error_messages={'required': 'New Password Confirmation field is required.'},
)
field_order = ['old_password', 'new_password1', 'new_password2']
class Meta:
parsley_extras = {
'new_password2': {
'equalto': "new_password1",
'error-message': "The two password fields didn't match.",
},
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['old_password'].widget.attrs['placeholder'] = "Your Old Password"
self.fields['new_password1'].widget.attrs['placeholder'] = "Your New Password"
self.fields['new_password2'].widget.attrs['placeholder'] = "Your New Password Confirmation"
def clean_old_password(self):
"""
Validate that the old_password field is correct.
"""
old_password = self.cleaned_data["old_password"]
if not self.user.check_password(old_password):
raise forms.ValidationError(
self.error_messages['password_incorrect'],
code='password_incorrect',
)
return old_password
@parsleyfy
class AdminPasswordChangeForm(forms.Form):
"""
A form used to change the password of a user in the admin interface.
"""
error_messages = {
'password_mismatch': _('The two password fields didn’t match.'),
}
required_css_class = 'required'
password1 = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password', 'autofocus': True}),
strip=False,
help_text=password_validation.password_validators_help_text_html(),
)
password2 = forms.CharField(
label=_("Password (again)"),
widget=forms.PasswordInput(attrs={'autocomplete': 'new-password'}),
strip=False,
help_text=_("Enter the same password as before, for verification."),
)
def __init__(self, user, *args, **kwargs):
self.user = user
super().__init__(*args, **kwargs)
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
password_validation.validate_password(password2, self.user)
return password2
def save(self, commit=True):
"""Save the new password."""
password = self.cleaned_data["password1"]
self.user.set_password(password)
if commit:
self.user.save()
return self.user
@property
def changed_data(self):
data = super().changed_data
for name in self.fields:
if name not in data:
return []
return ['password']
@parsleyfy
class ChristianBaseUserChangeForm(forms.ModelForm):
"""
This form update request user detail ( - User - )
"""
password = ReadOnlyPasswordHashField(
label=_("Password"),
help_text=_(
'Raw passwords are not stored, so there is no way to see this '
'user’s password, but you can change the password using '
'<a href="{}">this form</a>.'
),
)
first_name = forms.CharField(
max_length=255, required=True, error_messages={'required': 'First Name field is required.'}
)
last_name = forms.CharField(
max_length=255, required=True, error_messages={'required': 'Last Name field is required.'}
)
email = forms.EmailField(
max_length=255, required=True, error_messages={
'required': 'Email field is required.', "invalid": "Invalid email",
'unique': _("A user with that email already exists."),
}
)
username = forms.CharField(
max_length=255, required=True, error_messages={'required': 'Username field is required.'}
)
class Meta:
model = User
fields = ['username','first_name','last_name','email',]
field_classes = {'username': UsernameField}
def __init__(self, *args, **kwargs):
super(ChristianBaseUserChangeForm, self).__init__(*args, **kwargs)
password = self.fields.get('password')
if password:
password.help_text = password.help_text.format('../password/')
user_permissions = self.fields.get('user_permissions')
if user_permissions:
user_permissions.queryset = user_permissions.queryset.select_related('content_type')
self.fields['first_name'].widget.attrs['placeholder'] = "Your First Name"
self.fields['last_name'].widget.attrs['placeholder'] = "Your Last Name"
self.fields['email'].widget.attrs['placeholder'] = "Your Email"
self.fields['username'].widget.attrs['placeholder'] = "Your Username"
for field in iter(self.fields):
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
def clean_password(self):
# Regardless of what the user provides, return the initial value.
# This is done here, rather than on the field, because the
# field does not have access to the initial value
return self.initial.get('password')
@parsleyfy
class ChristianBaseUserProfileForm(forms.ModelForm):
"""
This form update request user detail ( - User Profile - )
"""
bio = forms.CharField(
max_length=7000, required=True,
widget=forms.Textarea(
attrs={'placeholder': 'Short info about you!'}
),
error_messages={'required': 'Bio field is required.'}
)
class Meta:
model = Profile
fields = ['bio', ]
class ChristianBaseUserProfilePhotoForm(forms.ModelForm):
"""
This form update request user detail ( - User Profile - )
"""
class Meta:
model = Profile
fields = ['image']
@parsleyfy
class ChristianBaseUserSettingForm(forms.ModelForm):
"""
This form allow to add their website and social accounts ( - User Setting - )
"""
website = forms.URLField(
label="Your Website URL",
max_length=255, required=False,
)
facebook_url = forms.URLField(
label="Your Facebook URL",
max_length=255, required=False,
)
youtube_url = forms.URLField(
label="Your Youtube URL",
max_length=255, required=False,
)
instagram_url = forms.URLField(
label="Your Instagram URL",
max_length=255, required=False,
)
linkedin_url = forms.URLField(
label="Your Linkedin URL",
max_length=255, required=False,
)
twitter_url = forms.URLField(
label="Your Twitter URL",
max_length=255, required=False,
)
class Meta:
model = Profile
fields = ['website', 'facebook_url', 'youtube_url', 'linkedin_url', 'twitter_url']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['website'].widget.attrs['placeholder'] = "https://www.yoursitename.com/"
self.fields['facebook_url'].widget.attrs['placeholder'] = "https://www.facebook.com/yourfacebookid/"
self.fields['youtube_url'].widget.attrs['placeholder'] = "https://www.youtube.com/youryoutubeid/"
self.fields['instagram_url'].widget.attrs['placeholder'] = "https://www.youtube.com/yourinstagramid/"
self.fields['linkedin_url'].widget.attrs['placeholder'] = "https://www.linkedin.com/yourlinkedinid/"
self.fields['twitter_url'].widget.attrs['placeholder'] = "https://www.facebook.com/yourtwttierid/"
@parsleyfy
class ChristianBaseUserFeedbackForm(forms.ModelForm):
"""
This form allow users give feedback ( - FeedBack - )
"""
message = forms.CharField(
max_length=7000, required=True,
widget=forms.Textarea(
attrs={'placeholder': 'Please enter the details of your feedback. A member of our support staff will respond as soon as possible'}
),
error_messages={'required': 'Message field is required.'}
)
class Meta:
model = Feedback
fields = ['message', ]
@parsleyfy
class ChristianBaseRecoverAccountForm(forms.Form):
"""
This form allow users to recover or unlocked account ( - Recover Account - )
"""
username = UsernameField(
max_length=255, required=True, error_messages={
'required': 'Username field is required.',
},
)
email = forms.EmailField(
max_length=255, required=True, error_messages={
'required': 'Email field is required.', "invalid": "Invalid email",
}
)
class Meta:
model = User
fields = ['username', 'email', ]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['username'].widget.attrs['placeholder'] = "Your Username"
self.fields['email'].widget.attrs['placeholder'] = "Your Email Address"
@parsleyfy
class ContactUsForm(forms.ModelForm):
"""
This form is contact form allow users give emergency issues ( - Contact Us Form - )
"""
name = forms.CharField(
max_length=255, required=True, error_messages={
'required': 'Name field is required.',
},
)
subject = forms.CharField(
max_length=255, required=True, error_messages={
'required': 'Subject field is required.',
},
)
email = forms.EmailField(
max_length=255, required=True, error_messages={
'required': 'Email field is required.', "invalid": "Invalid email",
}
)
message = forms.CharField(
max_length=9000, required=True,
widget=forms.Textarea(
attrs={'placeholder': 'Your message here?'}
),
error_messages={'required': 'Message field is required.'}
)
class Meta:
model = ContactUs
fields = ['name', 'subject', 'email', 'message', ]
def __init__(self, *args, **kwargs):
super(ContactUsForm, self).__init__(*args, **kwargs)
self.fields['name'].widget.attrs['placeholder'] = "Your Full Name"
self.fields['subject'].widget.attrs['placeholder'] = "Subject / Title"
self.fields['email'].widget.attrs['placeholder'] = "Your Email Address"
class ContactUsSettingsForm(forms.ModelForm):
class Meta:
model = ContactUsSettings
exclude = ()
def __init__(self, *args, **kwargs):
super(ContactUsSettingsForm, self).__init__(*args, **kwargs)
for field in iter(self.fields):
if max(enumerate(iter(self.fields)))[0] != field:
self.fields[field].widget.attrs.update({
'class': 'form-control',
"placeholder": "Please enter your " + field.replace('_', ' ').capitalize()
})
|
import random
import requests
import sys
def password():
# Remote accessing a txtfile with list of words using requests library
dictionary = "https://svnweb.freebsd.org/csrg/share/dict/words?view=co&content-type=text/plain"
response = requests.get(dictionary)
Words = response.content.splitlines()
numbers = ['0','1','2','3','4','5','6','7','8','9']
special = ['!','@','#','$','%','&','*']
print('\n' + "------- PASSWORD GENERATOR -------")
print('\n' + "Welcome... to the password generator,"
+ '\n' + 'Where all your passwords are generated... with care.'
+ '\n' + 'Weak, Strong, Short, Long,'
+ '\n' + 'Our library has it all :)' + '\n')
print("Let's start off with determing a password strength. When prompted,"
+ '\n' + 'please answer with one of the following options:' + '\n')
print("(1) Weak")
print("(2) Medium")
print("(3) Strong")
retry = True
while(retry):
strength = str(input('\n' + "So, what will it be? : "))
password = ""
if not(strength[0] == 's' or strength[0] == 'S') or strength == "1" or strength == "2":
index = random.randint(0, len(Words)-1)
word = str(Words[index])
ref = word[3:len(word)-1]
password += ref
# For medium passwords, append numbers to the end
if strength[0] == 'm' or strength[0] == 'M':
numNum = random.randint(1,2)
for i in range(numNum):
index = random.randint(0, len(numbers)-1)
password += numbers[index]
print("Password: " + password)
elif strength[0] == 's' or strength[0] == 'S' or strength == "3":
numWords = random.randint(1,2)
capLetters = random.randint(3,4)
numNum = 2
lenWordSect = 0
for w in range(numWords):
index = random.randint(0, len(Words)-1)
word = str(Words[index])
ref = word[3:len(word)-1]
password += ref
lenWordSect = len(password)
for n in range(numNum):
index = random.randint(0, len(numbers)-1)
password += numbers[index]
# For strong passwords, append a special char to the end
password += special[random.randint(0,len(special)-1)]
for c in range(capLetters):
index = random.randint(0,lenWordSect - 1)
chars = list(password)
chars[index] = chars[index].upper()
password = "".join(chars)
print("Password: " + password)
ans = str(input('\n' + "Would you like to make another password? (Y/N): "))
if ans[0] == 'Y' or ans[0] == 'y':
retry = True
else:
retry = False
print("Hope I serviced you well :) ")
sys.exit()
if __name__ =="__main__":
password()
|
# TianTcl - Whisper game - generator
import random
_subject = ["กัปตัน","เทพค้อน","ยักษ์เขียว","เกราะเหล็ก","แมงมุม","มนุษย์มด","ตาเหยี่ยว","เสือดำ"]
ext_sub = ["ตัวจิ๋ว","นักกล้าม","คนเหล็ก","หล่อเหลา","ผู้หิวโหย"]
_verb = ["กำลังบิน","กลิ้ง","คลาน","นอน","เต้น"]
ext_verb = [None,"อย่างรวดเร็ว","ช้าๆ","เสียงดังมาก","อย่างรุนแรง","อย่างบ้าคลั่ง","อย่างขาดสติ"]
_object = ["ไปสู้กับธานอส","ไปหาอินฟินิตี้สโตน","กินข้าว","เข้าห้องน้ำ"]
ext_obj = ["ที่ดาวอังคาร","ที่บ้านเพื่อน","รังมด","ป่าใหญ่","ทุ่งหญ้า","ดาวไททัน"]
def make(_part):
if _part == "subject":
word = random.choice(_subject)
elif _part == "verb":
word = random.choice(_verb)
elif _part == "object":
word = random.choice(_object)
elif _part == "ext subject":
word = random.choice(ext_sub)
elif _part == "ext verb":
word = random.choice(ext_verb)
elif _part == "ext object":
word = random.choice(ext_obj)
return word
def gen():
parts =["subject","ext subject","verb","ext verb","object","ext object"]
sentence = ""
for part in parts:
word = make(part)
if word is not None:
sentence += word
return sentence
|
import PyCapture2
import logging
import numpy as np
def setup_camera():
bus = PyCapture2.BusManager()
num_cams = bus.getNumOfCameras()
logging.info('Number of cameras detected: %d' % num_cams)
if not num_cams:
logging.error('Insufficient number of cameras. Exiting...')
raise ValueError('Insufficient number of cameras. Exiting...')
cam = PyCapture2.Camera()
cam.connect(bus.getCameraFromIndex(0))
cam_info = cam.getCameraInfo()
logging.info('*** CAMERA INFORMATION ***')
logging.info('Serial number - %d', cam_info.serialNumber)
logging.info('Camera model - %s', cam_info.modelName)
logging.info('Camera vendor - %s', cam_info.vendorName)
logging.info('Sensor - %s', cam_info.sensorInfo)
logging.info('Resolution - %s', cam_info.sensorResolution)
logging.info('Firmware version - %s', cam_info.firmwareVersion)
logging.info('Firmware build time - %s', cam_info.firmwareBuildTime)
cam.startCapture()
return cam
def shot(cam):
image = cam.retrieveBuffer()
image = image.convert(PyCapture2.PIXEL_FORMAT.BGR)
image2 = np.array(image.getData(), dtype=np.uint8).reshape(image.getRows(), image.getCols(), 3)
return image2
def close_camera(cam):
cam.stopCapture()
cam.disconnect()
logging.info('Camara Disconnected.')
if __name__ == '__main__':
import cv2
from utils import setup_logger
setup_logger('DEBUG')
cam = setup_camera()
try:
while True:
img = shot(cam)
cv2.imshow('test', img)
cv2.waitKey(1)
finally:
cv2.destroyAllWindows()
close_camera(cam)
|
def check_vowel(string, position):
if position < 0: return False
try:
return string[position].lower() in 'aeiou'
except IndexError:
return False
'''
Check if it is a vowel(a, e, i, o, u,) on the n position in a string
(the first argument). Don't forget about uppercase.
A few cases:
{
checkVowel('cat', 1) -> true // 'a' is a vowel
checkVowel('cat', 0) -> false // 'c' is not a vowel
checkVowel('cat', 4) -> false // this position doesn't exist
}
P.S. If n < 0, return false
'''
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http.response import HttpResponseRedirect, HttpResponse
from django.urls.base import reverse
from django.utils import timezone
from django.utils.decorators import method_decorator
from django.views.generic.edit import CreateView
from django.views.generic.list import ListView
from Lyan_Tutorial import settings
from messaging.forms import CreateGroupForm, MessageForm
from .models import Group, Message
@method_decorator(login_required, name='dispatch')
class ProfileView(ListView):
model = Group
template_name = 'messaging/profile.html'
context_object_name = 'all_groups'
@method_decorator(login_required, name='dispatch')
class GroupView(CreateView):
model = Message
template_name = 'messaging/group.html'
context_object_name = 'message'
form_class = MessageForm
def get_template_names(self):
gp = Group.objects.get(pk=self.kwargs['group_id'])
members = gp.members.all()
if self.request.user in members:
return ['messaging/group.html']
else:
return ['messaging/group_not_joined.html']
def get_success_url(self):
return reverse('messaging:group', args=self.kwargs['group_id'])
def get_context_data(self, **kwargs):
context = super(GroupView, self).get_context_data(**kwargs)
gp = Group.objects.get(pk=self.kwargs['group_id'])
members = gp.members.all()
context['group'] = gp
if self.request.user in members:
context["message"] = self.model.objects.filter(group=gp)
return context
def form_valid(self, form):
gp = Group.objects.get(pk=self.request.POST.get("group_id"))
timezone.activate(settings.TIME_ZONE)
if gp.members.filter(id=self.request.user.id):
form.instance.author = self.request.user
temp = self.request.POST.get("group_id")
form.instance.group_id = temp
form.instance.date = timezone.localtime(timezone.now())
print(form.instance.date.strftime("%H:%M"))
self.object = form.save()
return HttpResponseRedirect(self.get_success_url())
raise PermissionDenied
@method_decorator(login_required, name='dispatch')
class CreateGroupView(CreateView):
model = Group
template_name = 'messaging/create_group.html'
form_class = CreateGroupForm
def get_success_url(self):
return reverse('messaging:profile')
def form_valid(self, form):
form.instance.creator = self.request.user
super(CreateGroupView, self).form_valid(form)
self.object.members.add(self.request.user)
self.object.save() #TODO: Refactor this
return HttpResponseRedirect(self.get_success_url())
@login_required
def join_group(request):
if request.method == 'POST': #TODO: Refactor
temp = request.POST.get("group_id")
gp = Group.objects.get(pk=temp)
members = gp.members.all()
if request.user not in members:
gp.members.add(request.user)
return HttpResponseRedirect(reverse('messaging:profile'))
|
import unittest
from katas.kyu_7.thinking_and_testing_true_or_false import testit as solution
class ThinkingAndTestingTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(solution(0), 0)
def test_equals_2(self):
self.assertEqual(solution(2), 1)
def test_equals_3(self):
self.assertEqual(solution(3), 2)
def test_equals_4(self):
self.assertEqual(solution(4), 1)
def test_equals_5(self):
self.assertEqual(solution(5), 2)
def test_equals_6(self):
self.assertEqual(solution(6), 2)
def test_equals_7(self):
self.assertEqual(solution(7), 3)
def test_equals_8(self):
self.assertEqual(solution(8), 1)
def test_equals_9(self):
self.assertEqual(solution(9), 2)
def test_equals_10(self):
self.assertEqual(solution(10), 2)
def test_equals_11(self):
self.assertEqual(solution(100), 3)
def test_equals_12(self):
self.assertEqual(solution(1000), 6)
def test_equals_13(self):
self.assertEqual(solution(10000), 5)
|
import numpy as np
from sklearn import metrics
import argparse
import matplotlib.pyplot as plt
from os import path, makedirs
def compute_accuracies(ranks_file, total_ranks):
rank = np.loadtxt(ranks_file, dtype=np.float)
if np.ndim(rank) == 1:
rank_scores = rank.astype(float)
else:
rank_scores = rank[:, 2].astype(float)
acc = np.zeros(shape=total_ranks)
for i in range(total_ranks):
acc[i] = len(rank_scores[rank_scores <= (i + 1)]) / len(rank_scores)
return acc
def plot(title, total_ranks, acc1, l1, acc2, l2, acc3, l3):
plt.rcParams["figure.figsize"] = [7, 5]
plt.rcParams['font.size'] = 12
plt.grid(True, zorder=0, linestyle='dashed')
if title is not None:
plt.title(title, y=1.08)
if total_ranks is None:
total_ranks = np.max(acc1)
if args.rank2 is not None:
total_ranks = max(total_ranks, np.max(acc2))
if args.rank3 is not None:
total_ranks = max(total_ranks, np.max(acc3))
ranks = np.arange(1, total_ranks + 1)
plt.plot(ranks, acc1, 'C1', label=l1)
if l2 is not None:
plt.plot(ranks, acc2, 'C0', label=l2)
if l3 is not None:
plt.plot(ranks, acc3, 'C3', label=l3)
plt.legend(loc='lower right')
plt.xlim([1, total_ranks])
# plt.ylim([0, 1])
plt.ylabel('Accuracy')
plt.xlabel('Rank')
plt.tight_layout(pad=0.2)
return plt
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot CMC Curve')
parser.add_argument('-total_ranks', '-r', help='Number of ranks to compute.')
parser.add_argument('-rank1', '-r1', help='Rank scores 1.')
parser.add_argument('-label1', '-l1', help='Label 1.')
parser.add_argument('-rank2', '-r2', help='Rank scores 2.')
parser.add_argument('-label2', '-l2', help='Label 2.')
parser.add_argument('-rank3', '-r3', help='Rank scores 3.')
parser.add_argument('-label3', '-l3', help='Label 3.')
parser.add_argument('-title', '-t', help='Plot title.')
parser.add_argument('-dest', '-d', help='Folder to save the plot.')
parser.add_argument('-name', '-n', help='Plot name (without extension).')
args = parser.parse_args()
total_ranks = int(args.total_ranks)
acc2 = None
acc3 = None
acc1 = compute_accuracies(args.rank1, total_ranks)
if args.rank2 is not None:
acc2 = compute_accuracies(args.rank2, total_ranks)
if args.rank3 is not None:
acc3 = compute_accuracies(args.rank3, total_ranks)
plot(args.title, total_ranks, acc1, args.label1, acc2, args.label2, acc3, args.label3)
if not path.exists(args.dest):
makedirs(args.dest)
plot_path = path.join(args.dest, args.name + '.png')
plt.savefig(plot_path, dpi=600)
|
# Generated by Django 3.1.3 on 2021-01-04 14:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0010_auto_20210104_1508'),
]
operations = [
migrations.AddField(
model_name='booking',
name='service',
field=models.CharField(choices=[('Phone', 'Phone'), ('Email', 'Email'), ('Office', 'Office')], default='Phone', max_length=6),
),
]
|
#Extend#############################################################
def pip(command,value):
try:
import subprocess
subprocess.run('pip %s %s'%(command,value))
except:
raise RuntimeError ("Failed To Load subprocess library")
#Converted##########################################################
def isset(varName):
keys = globals().keys()
for key in keys:
if str(varName) == key:return True
return False
def is_dir(dir):
try:
from os import scandir as sd
except:
print("")
try:
sd(str(dir)).__next__()
return True
except:
return False
def file_exists(path):
try:
import os.path
return os.path.isfile(path)
except:
return False
def is_readable(path):
try:
open(str(path))
return True
except:
return False
def date(format):
try:
from time import strftime,gmtime
except:
print()
try:
return strftime(str(format),gmtime())
except:
print()
def time():
try:
from time import time
except:
print()
return time()
def explode(delimiter,string,limit):
# Python counts how many splits, PHP counts the final array count, so we - 1
return str(string).split(delimiter, limit - 1)
def implode(glue,pieces):
return glue.join(pieces)
def strtolower(string):
return lower(string)
def strtoupper(string):
return upper(string)
def strval(value):
return str(value)
def intval(string):
integer = ''
for ch in str(string):
if(ch in ['0','1','2','4','5','6','7','8','9']):
integer += ch
return int(integer)
def stripos(string, keyword):
return str(string).find(keyword)
def die(string):
import sys
sys.exit(string)
exit = die
def count(var):
return len(var)
def file_get_contents(fileName):
try:
return open(fileName,'r').read()
except:
raise FileNotFoundError ("File Not Found")
def file_put_contents(fileName,Data):
try:
open(fileName,'w').write(Data)
except:
raise Exception ("Failed To Save File")
def empty(var):
if type(var) is int and var not in ['',None]:
return False
if type(var) is str and var not in ['',None]:
return False
if type(var) in [list,tuple,dict] and var not in [{},(),[],'',None]:
return False
if var not in [[],[[]],{},(),'',None]:
return False
return True
def is_int(var):
return isinstance(var, int)
def is_float(var):
return isinstance(var, float)
is_double = is_float
def is_array(var):
return isinstance(var, arr)
def is_string(var):
return isinstance(var, str)
def echo(val):
print(val)
def print_r(val):
print(val)
def var_dump(array):
print(array)
def json_encode(array):
try:
import json
except:
print()
try:
return json.dumps(array)
except:
raise ValueError ("Failed To Encode Json")
def json_decode(jsonEn):
try:
import json
except:
print()
try:
return json.loads(jsonEn)
except:
raise ValueError ("Failed To Decode Json")
def base64encode(string):
try:
import base64
except:
print()
return base64.b64encode('base64')
def base64decode(base64):
try:
import base64
except:
print()
return base64.b64decode('base64')
def read(file):
try:
return open(fileName,'r').read()
except:
raise FileNotFoundError ("File Not Found")
def scandir(dir):
try:
import os
except:
print()
a = os.scandir(dir)
list = []
while(True):
try:
b = str(a.__next__()).split("'")[1]
if (b == ''):break
list.append(b)
except:
break
return list
def substr(string,start,end=''):
string = str(string)
if(end == ''):
return string[int(start):]
else:
return string[int(start):int(end)]
def unlink(path):
try:
import os
except:
print()
try:
os.remove(path)
return True
except:
return False
def urlencode(url):
try:
import urllib
except:
print()
return urllib.parse.quote(url)
def urldecode(enurl):
try:
import urllib
except:
print()
return urllib.parse.unquote(enurl)
def bin2hex(text):
return ''.join(hex(ord(c)) for c in text).replace('0x','')
def decbin(num):
"""
Converts a decimal number(num) to binary
Parameters
----------
num : int or string
Returns
-------
integer denoting value of decimal number in binary format.
"""
try:
return bin(int(num))[2:]
except:
raise ValueError("Expected a Number as input")
def decoct():
"""
Converts a decimal number(num) to octal i.e. base 8.
Parameters
----------
num : int or string
Returns
-------
integer denoting value of decimal number in octal format.
"""
try:
return oct(int(num))[2:]
except:
raise ValueError("Expected a Number as input")
def dechex(num):
"""
Converts a decimal number(num) to hexadecimal ie base 16
Parameters
----------
num : int or string
Returns
-------
integer denoting value of decimal number in hex format.
"""
try:
# returning upper because php does that.
return hex(int(num))[2:].upper()
except:
raise ValueError("Expected a Number as input")
def str_replace(string, value, new_value, occurences = -1):
"""
Replaces the value of `value` to `new_value` in `string`.
If occurences is defined, will only replace the first n occurences. A negative value replaces all values.
"""
return string.replace(value, new_value, occurences)
def trim(string, trim_chars = None):
"""
Strips all whitespace characters from the beginning and end of the `string`. If `trim_chars` is set, instead of whitespace, will replace only those characters.
"""
return string.strip(trim_chars)
def strpos(string, search_val, offset = 0):
"""
Returns the position of `search_val` in `string`, or False if it doesn't exist. If `offset` is defined, will start looking if `search_val` exists after the `offset`.
"""
try:
return string[offset:].index(search_val) + offset
except ValueError:
return False
def strstr(string, search_val):
"""
Searches string for `search_val` and if found, returns all characters from there onwards. Returns false if `search_val` is not found.
"""
str_index = strpos(string, search_val)
if not str_index: return False
return string[str_index:]
def is_string(val):
return type(val) == str
def is_array(val):
return type(val) == list
def in_array(l, val):
return l in val
def array_unique(l):
"""
Removes all duplicates from `l`.
"""
return list(set(l))
def array_search(l, val):
"""
Returns the index of `val` in `l`.
"""
try:
return l.index(val)
except ValueError:
return False
def array_reverse(l):
"""
Reverses an array. PHP's array_reverse() allows you to preserve the old keys, but that doesn't work in python.
"""
return list(reversed(l))
def array_map(func, l):
return list(map(func, l))
def array_diff(l, *other_arrays):
"""
Removes all elements from `l` that are present in at least one of the arrays in `other_arrays`.
"""
for a in other_arrays:
l = [x for x in l if x not in a]
return l
|
#!/usr/bin/env python
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import webapp2
from google.appengine.api import taskqueue
from google.appengine.ext import db
from models import Test
from models import PersistentCache
def cache_manifest(cache):
PersistentCache.set_cache('manifest', cache)
def schedule_manifest_update():
taskqueue.add(url='/api/test/update')
class CachedManifestHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
manifest = PersistentCache.get_cache('manifest')
if manifest:
self.response.out.write(manifest)
else:
schedule_manifest_update()
def cache_dashboard(cache):
PersistentCache.set_cache('dashboard', cache)
def schedule_dashboard_update():
taskqueue.add(url='/api/test/dashboard/update')
class CachedDashboardHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
dashboard = PersistentCache.get_cache('dashboard')
if dashboard:
self.response.out.write(dashboard)
else:
schedule_dashboard_update()
def cache_runs(test_id, branch_id, platform_id, cache):
PersistentCache.set_cache(Test.cache_key(test_id, branch_id, platform_id), cache)
def schedule_runs_update(test_id, branch_id, platform_id):
taskqueue.add(url='/api/test/runs/update', params={'id': test_id, 'branchid': branch_id, 'platformid': platform_id})
class CachedRunsHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
try:
test_id = int(self.request.get('id', 0))
branch_id = int(self.request.get('branchid', 0))
platform_id = int(self.request.get('platformid', 0))
except TypeError:
# FIXME: Output an error here
test_id = 0
branch_id = 0
platform_id = 0
runs = PersistentCache.get_cache(Test.cache_key(test_id, branch_id, platform_id))
if runs:
self.response.out.write(runs)
else:
schedule_runs_update(test_id, branch_id, platform_id)
def schedule_report_process(log):
taskqueue.add(url='/api/test/report/process', params={'id': log.key().id()})
|
"""Core views."""
from .admin import (
check_top_notifications, information, logs, logs_page, parameters,
viewsettings
)
from .auth import (
PasswordResetView, dologin, dologout, VerifySMSCodeView,
ResendSMSCodeView, TwoFactorCodeVerifyView
)
from .base import RootDispatchView
from .dashboard import DashboardView
from .user import (
api_access, index, preferences, profile, security)
__all__ = [
"DashboardView",
"PasswordResetView",
"ResendSMSCodeView",
"RootDispatchView",
"VerifySMSCodeView",
"api_access",
"check_top_notifications",
"dologin",
"dologout",
"TwoFactorCodeVerifyView",
"index",
"information",
"logs",
"logs_page",
"parameters",
"preferences",
"profile",
"viewsettings",
"security"
]
|
# /usr/bin/python3
import argparse
import sys
# from cortx.utils.conf_store import Conf
# from cortx.utils.process import SimpleProcess
# NOTE: used pyyaml and subprocess since cortx-utils is
# not installed on the container.
import yaml
import subprocess
import os
def get_local(config_url):
# Conf.load('Config', config_url)
# return Conf.get('Config', 'cortx>common>storage>log')
# NOTE: using pyyaml since cortx-utils is not installed on the container.
log_dir = None
with open(config_url) as f:
conf = yaml.load(f, Loader=yaml.loader.SafeLoader)
log_dir = (conf["cortx"]["common"]["storage"]["log"])
return log_dir
def run_command(command):
"""Run the command and get the response and error returned"""
process = subprocess.Popen(
command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
response, error = process.communicate()
return response.decode().rstrip('\n'), error.decode().rstrip('\n')
def setup_cron_job(local):
cron_shedule = "* * * * *" # every minute
rollover_script_cmd = \
"/usr/bin/python3 /opt/cortx/component/logrotate/cron/log_rollover.py"
log_dir = os.path.join(local, "component")
with open("/etc/cron.d/cron_entries", 'a') as f:
f.write(f"{cron_shedule} {rollover_script_cmd} --logpath {log_dir}\n")
# _, err, retcode = SimpleProcess("crontab /etc/cron.d/cron_entries").run()
# NOTE: using run_command since cortx-utils is not installed on the container.
_, err = run_command("crontab /etc/cron.d/cron_entries")
if err:
sys.stderr.write(err)
exit(1)
def usage():
""" Print usage instructions """
sys.stderr.write(
"usage: python3 startup.py --config <url>\n"
"where:\n"
"url Config URL\n")
if __name__ == "__main__":
argParser = argparse.ArgumentParser()
argParser.add_argument(
'-c', '--config',
help="Config url to read configurations values."
)
args = argParser.parse_args()
if not args.config:
usage()
exit(1)
local = get_local(args.config)
setup_cron_job(local)
|
#!/usr/bin/python
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2016 Sebastien Jodogne, Medical Physics
# Department, University Hospital of Liege, Belgium
# Copyright (C) 2017-2020 Osimis S.A., Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# sudo pip install freetype-py
import freetype
import json
import os
import sys
import unicodedata
if len(sys.argv) != 3:
print('Usage: %s <Font> <Size>\n' % sys.argv[0])
print('Example: %s /usr/share/fonts/truetype/ubuntu-font-family/UbuntuMono-B.ttf 16\n' % sys.argv[0])
sys.exit(-1)
FONT = sys.argv[1]
PIXEL_SIZE = int(sys.argv[2])
CHARSET = 'latin-1'
# Load the font
face = freetype.Face(FONT)
face.set_char_size(PIXEL_SIZE * 64)
# Generate all the characters between 0 and 255
characters = ''.join(map(chr, range(0, 256)))
# Interpret the string using the required charset
characters = characters.decode(CHARSET, 'ignore')
# Keep only non-control characters
characters = filter(lambda c: unicodedata.category(c)[0] != 'C', characters)
font = {
'Name' : os.path.basename(FONT),
'Size' : PIXEL_SIZE,
'Characters' : {}
}
def PrintCharacter(c):
pos = 0
for i in range(c['Height']):
s = ''
for j in range(c['Width']):
if c['Bitmap'][pos] > 127:
s += '*'
else:
s += ' '
pos += 1
print s
for c in characters:
face.load_char(c)
info = {
'Width' : face.glyph.bitmap.width,
'Height' : face.glyph.bitmap.rows,
'Advance' : face.glyph.metrics.horiAdvance / 64,
'Top' : -face.glyph.metrics.horiBearingY / 64,
'Bitmap' : face.glyph.bitmap.buffer,
}
font['Characters'][ord(c)] = info
#PrintCharacter(info)
minTop = min(map(lambda (k, v): v['Top'], font['Characters'].iteritems()))
for c in font['Characters']:
font['Characters'][c]['Top'] -= minTop
font['MaxAdvance'] = max(map(lambda (k, v): v['Advance'], font['Characters'].iteritems()))
font['MaxHeight'] = max(map(lambda (k, v): v['Height'], font['Characters'].iteritems()))
print json.dumps(font)
|
# back tracking problem
|
# -*- coding: utf-8 -*-
'''
Tools for loading data from mne's fiff files.
.. autosummary::
:toctree: generated
events
add_epochs
add_mne_epochs
epochs
mne_epochs
Converting mne objects to :class:`NDVar`:
.. autosummary::
:toctree: generated
epochs_ndvar
evoked_ndvar
stc_ndvar
.. currentmodule:: eelbrain
Managing events with a :class:`Dataset`
---------------------------------------
To load events as :class:`Dataset`::
>>> ds = load.fiff.events(raw_file_path)
By default, the :class:`Dataset` contains a variable called ``"trigger"``
with trigger values, and a variable called ``"i_start"`` with the indices of
the events::
>>> print ds[:10]
trigger i_start
-----------------
2 27977
3 28345
1 28771
4 29219
2 29652
3 30025
1 30450
4 30839
2 31240
3 31665
These events can be modified in ``ds`` (discarding events, modifying
``i_start``) before being used to load data epochs.
Epochs can be loaded as :class:`NDVar` with :func:`load.fiff.epochs`. Epochs
will be loaded based only on the ``"i_start"`` variable, so any modification
to this variable will affect the epochs that are loaded.::
>>> ds['epochs'] = load.fiff.epochs(ds)
:class:`mne.Epochs` can be loaded with::
>>> mne_epochs = load.fiff.mne_epochs(ds)
Note that the returned ``mne_epochs`` event does not contain meaningful event
ids, and ``mne_epochs.event_id`` is None.
Using Threshold Rejection
-------------------------
In case threshold rejection is used, the number of the epochs returned by
``load.fiff.epochs(ds, reject=reject_options)`` might not be the same as the
number of events in ``ds`` (whenever epochs are rejected). For those cases,
:func:`load.fiff.add_epochs`` will automatically resize the :class:`Dataset`::
>>> epoch_ds = load.fiff.add_epochs(ds, -0.1, 0.6, reject=reject_options)
The returned ``epoch_ds`` will contain the epochs as NDVar as ``ds['meg']``.
If no epochs got rejected during loading, the length of ``epoch_ds`` is
identical with the input ``ds``. If epochs were rejected, ``epoch_ds`` is a
shorter copy of the original ``ds``.
:class:`mne.Epochs` can be added to ``ds`` in the same fashion with::
>>> ds = load.fiff.add_mne_epochs(ds, -0.1, 0.6, reject=reject_options)
Separate events files
---------------------
If events are stored separately form the raw files, they can be loaded in
:func:`load.fiff.events` by supplying the path to the events file as
``events`` parameter::
>>> ds = load.fiff.events(raw_file_path, events=events_file_path)
'''
from __future__ import division
import fnmatch
from itertools import izip_longest
import os
import numpy as np
import mne
from mne.source_estimate import _BaseSourceEstimate
from mne.io.constants import FIFF
from mne.io import Raw as _mne_Raw
from mne.io import read_raw_kit as _mne_read_raw_kit
from .. import _colorspaces as _cs
from .._info import BAD_CHANNELS
from .._utils import ui, logger
from .._data_obj import Var, NDVar, Dataset, Sensor, SourceSpace, UTS, \
_matrix_graph
def mne_raw(path=None, proj=False, **kwargs):
"""
Returns a mne Raw object with added projections if appropriate.
Parameters
----------
path : None | str(path)
path to the raw fiff file. If ``None``, a file can be chosen form a
file dialog.
proj : bool | str(path)
Add projections from a separate file to the Raw object.
**``False``**: No proj file will be added.
**``True``**: ``'{raw}*proj.fif'`` will be used.
``'{raw}'`` will be replaced with the raw file's path minus '_raw.fif',
and '*' will be expanded using fnmatch. If multiple files match the
pattern, a ValueError will be raised.
**``str``**: A custom path template can be provided, ``'{raw}'`` and
``'*'`` will be treated as with ``True``.
kwargs
Additional keyword arguments are forwarded to mne Raw initialization.
"""
if path is None:
path = ui.ask_file("Pick a Raw Fiff File", "Pick a Raw Fiff File",
[('Functional image file (*.fif)', '*.fif'),
('KIT Raw File (*.sqd,*.con', '*.sqd;*.con')])
if not path:
return
if not os.path.isfile(path):
raise IOError("%r is not a file" % path)
if isinstance(path, basestring):
_, ext = os.path.splitext(path)
if ext.startswith('.fif'):
raw = _mne_Raw(path, **kwargs)
elif ext in ('.sqd', '.con'):
raw = _mne_read_raw_kit(path, **kwargs)
else:
raise ValueError("Unknown extension: %r" % ext)
else:
raw = _mne_Raw(path, **kwargs)
if proj:
if proj is True:
proj = '{raw}*proj.fif'
if '{raw}' in proj:
raw_file = raw.info['filename']
raw_root, _ = os.path.splitext(raw_file)
raw_root = raw_root.rstrip('raw')
proj = proj.format(raw=raw_root)
if '*' in proj:
head, tail = os.path.split(proj)
names = fnmatch.filter(os.listdir(head), tail)
if len(names) == 1:
proj = os.path.join(head, names[0])
else:
if len(names) == 0:
err = "No file matching %r"
else:
err = "Multiple files matching %r"
raise ValueError(err % proj)
# add the projections to the raw file
proj = mne.read_proj(proj)
raw.add_proj(proj, remove_existing=True)
return raw
def events(raw=None, merge=-1, proj=False, name=None, bads=None,
stim_channel=None, events=None, **kwargs):
"""
Load events from a raw fiff file.
Parameters
----------
raw : str(path) | None | mne Raw
The raw fiff file from which to extract events (if raw and events are
both ``None``, a file dialog will be displayed to select a raw file).
merge : int
Merge steps occurring in neighboring samples. The integer value
indicates over how many samples events should be merged, and the sign
indicates in which direction they should be merged (negative means
towards the earlier event, positive towards the later event).
proj : bool | str
Path to the projections file that will be loaded with the raw file.
``'{raw}'`` will be expanded to the raw file's path minus extension.
With ``proj=True``, ``'{raw}_*proj.fif'`` will be used,
looking for any projection file starting with the raw file's name.
If multiple files match the pattern, a ValueError will be raised.
name : str | None
A name for the Dataset. If ``None``, the raw filename will be used.
bads : None | list
Specify additional bad channels in the raw data file (these are added
to the ones that are already defined in the raw file).
stim_channel : None | string | list of string
Name of the stim channel or all the stim channels
affected by the trigger. If None, the config variables
'MNE_STIM_CHANNEL', 'MNE_STIM_CHANNEL_1', 'MNE_STIM_CHANNEL_2',
etc. are read. If these are not found, it will default to
'STI 014'.
events : None | str
If events are stored in a fiff file separate from the Raw object, the
path to the events file can be supplied here. The events in the Dataset
will reflect the event sin the events file rather than the raw file.
others :
Keyword arguments for loading the raw file.
Returns
-------
events : Dataset
A Dataset with the following variables:
- *i_start*: the index of the event in the raw file.
- *trigger*: the event value.
The Dataset's info dictionary contains the following values:
- *raw*: the mne Raw object.
"""
if (raw is None and events is None) or isinstance(raw, basestring):
raw = mne_raw(raw, proj=proj, **kwargs)
if bads is not None and raw is not None :
raw.info['bads'].extend(bads)
if name is None and raw is not None:
raw_path = raw.info['filename']
if isinstance(raw_path, basestring):
name = os.path.basename(raw_path)
else:
name = None
if events is None:
evts = mne.find_stim_steps(raw, merge=merge, stim_channel=stim_channel)
idx = np.nonzero(evts[:, 2])
evts = evts[idx]
else:
evts = mne.read_events(events)
if len(evts) == 0:
raise ValueError("No events found!")
i_start = Var(evts[:, 0], name='i_start')
trigger = Var(evts[:, 2], name='trigger')
info = {'raw': raw}
return Dataset((trigger, i_start), name, info=info)
def _guess_ndvar_data_type(info):
"""Guess which type of data to extract from an mne object.
Checks for the presence of channels in that order: "mag", "eeg", "grad".
If none are found, a ValueError is raised.
Parameters
----------
info : dict
MNE info dictionary.
Returns
-------
data : str
Kind of data to extract
"""
for ch in info['chs']:
kind = ch['kind']
if kind == FIFF.FIFFV_MEG_CH:
if ch['unit'] == FIFF.FIFF_UNIT_T_M:
return 'grad'
elif ch['unit'] == FIFF.FIFF_UNIT_T:
return 'mag'
elif kind == FIFF.FIFFV_EEG_CH:
return 'eeg'
raise ValueError("No MEG or EEG channel found in info.")
def _picks(info, data, exclude):
if data == 'eeg':
meg = False
eeg = True
eog = False
elif data == 'eeg&eog':
meg = False
eeg = True
eog = True
elif data in ['grad', 'mag']:
meg = data
eeg = False
eog = False
else:
err = "data=%r (needs to be 'eeg', 'grad' or 'mag')" % data
raise ValueError(err)
picks = mne.pick_types(info, meg, eeg, False, eog, ref_meg=False,
exclude=exclude)
return picks
def _ndvar_epochs_reject(data, reject):
if reject:
if not np.isscalar(reject):
err = ("Reject must be scalar (rejection threshold); got %s." %
repr(reject))
raise ValueError(err)
reject = {data: reject}
else:
reject = None
return reject
def epochs(ds, tmin=-0.1, tmax=0.6, baseline=None, decim=1, mult=1, proj=False,
data='mag', reject=None, exclude='bads', info=None, name=None,
raw=None, sensors=None, i_start='i_start'):
"""
Load epochs as :class:`NDVar`.
Parameters
----------
ds : Dataset
Dataset containing a variable which defines epoch cues (i_start).
tmin, tmax : scalar
First and last sample to include in the epochs in seconds.
baseline : tuple(tmin, tmax) | ``None``
Time interval for baseline correction. Tmin/tmax in seconds, or None to
use all the data (e.g., ``(None, 0)`` uses all the data from the
beginning of the epoch up to t=0). ``baseline=None`` for no baseline
correction (default).
decim : int
Downsample the data by this factor when importing. ``1`` means no
downsampling. Note that this function does not low-pass filter
the data. The data is downsampled by picking out every
n-th sample (see `Wikipedia <http://en.wikipedia.org/wiki/Downsampling>`_).
mult : scalar
multiply all data by a constant.
proj : bool
mne.Epochs kwarg (subtract projections when loading data)
data : 'eeg' | 'mag' | 'grad'
The kind of data to load.
reject : None | scalar
Threshold for rejecting epochs (peak to peak). Requires a for of
mne-python which implements the Epochs.model['index'] variable.
exclude : list of string | str
Channels to exclude (:func:`mne.pick_types` kwarg).
If 'bads' (default), exclude channels in info['bads'].
If empty do not exclude any.
info : None | dict
Entries for the ndvar's info dict.
name : str
name for the new NDVar.
raw : None | mne Raw
Raw file providing the data; if ``None``, ``ds.info['raw']`` is used.
sensors : None | Sensor
The default (``None``) reads the sensor locations from the fiff file.
If the fiff file contains incorrect sensor locations, a different
Sensor instance can be supplied through this kwarg.
i_start : str
name of the variable containing the index of the events.
Returns
-------
epochs : NDVar
The epochs as NDVar object.
"""
if raw is None:
raw = ds.info['raw']
picks = _picks(raw.info, data, exclude)
reject = _ndvar_epochs_reject(data, reject)
epochs_ = mne_epochs(ds, tmin, tmax, baseline, i_start, raw, decim=decim,
picks=picks, reject=reject, proj=proj)
ndvar = epochs_ndvar(epochs_, name, data, mult=mult, info=info,
sensors=sensors)
if len(epochs_) == 0:
raise RuntimeError("No events left in %r" % raw.info['filename'])
return ndvar
def add_epochs(ds, tmin=-0.1, tmax=0.6, baseline=None, decim=1, mult=1,
proj=False, data='mag', reject=None, exclude='bads', info=None,
name="meg", raw=None, sensors=None, i_start='i_start',
sysname=None):
"""
Load epochs and add them to a dataset as :class:`NDVar`.
Unless the ``reject`` argument is specified, ``ds``
is modified in place. With ``reject``, a subset of ``ds`` is returned
containing only those events for which data was loaded.
Parameters
----------
ds : Dataset
Dataset containing a variable which defines epoch cues (i_start) and to
which the epochs are added.
tmin, tmax : scalar
First and last sample to include in the epochs in seconds.
baseline : tuple(tmin, tmax) | ``None``
Time interval for baseline correction. Tmin/tmax in seconds, or None to
use all the data (e.g., ``(None, 0)`` uses all the data from the
beginning of the epoch up to t=0). ``baseline=None`` for no baseline
correction (default).
decim : int
Downsample the data by this factor when importing. ``1`` means no
downsampling. Note that this function does not low-pass filter
the data. The data is downsampled by picking out every
n-th sample (see `Wikipedia <http://en.wikipedia.org/wiki/Downsampling>`_).
mult : scalar
multiply all data by a constant.
proj : bool
mne.Epochs kwarg (subtract projections when loading data)
data : 'eeg' | 'mag' | 'grad'
The kind of data to load.
reject : None | scalar
Threshold for rejecting epochs (peak to peak). Requires a for of
mne-python which implements the Epochs.model['index'] variable.
exclude : list of string | str
Channels to exclude (:func:`mne.pick_types` kwarg).
If 'bads' (default), exclude channels in info['bads'].
If empty do not exclude any.
info : None | dict
Entries for the ndvar's info dict.
name : str
name for the new NDVar.
raw : None | mne Raw
Raw file providing the data; if ``None``, ``ds.info['raw']`` is used.
sensors : None | Sensor
The default (``None``) reads the sensor locations from the fiff file.
If the fiff file contains incorrect sensor locations, a different
Sensor instance can be supplied through this kwarg.
i_start : str
name of the variable containing the index of the events.
sysname : str
Name of the sensor system (used to load sensor connectivity).
Returns
-------
ds : Dataset
Dataset containing the epochs. If no events are rejected, ``ds`` is the
same object as the input ``ds`` argument, otherwise a copy of it.
"""
if raw is None:
raw = ds.info['raw']
picks = _picks(raw.info, data, exclude)
reject = _ndvar_epochs_reject(data, reject)
epochs_ = mne_epochs(ds, tmin, tmax, baseline, i_start, raw, decim=decim,
picks=picks, reject=reject, proj=proj, preload=True)
ds = _trim_ds(ds, epochs_)
ds[name] = epochs_ndvar(epochs_, name, data, mult=mult, info=info,
sensors=sensors, sysname=sysname)
return ds
def add_mne_epochs(ds, tmin=-0.1, tmax=0.6, baseline=None, target='epochs',
**kwargs):
"""
Load epochs and add them to a dataset as :class:`mne.Epochs`.
If, after loading, the Epochs contain fewer cases than the Dataset, a copy
of the Dataset is made containing only those events also contained in the
Epochs. Note that the Epochs are always loaded with ``preload==True``.
If the Dataset's info dictionary contains a 'bad_channels' entry, those bad
channels are added to the epochs.
Parameters
----------
ds : Dataset
Dataset with events from a raw fiff file (i.e., created by
load.fiff.events).
tmin, tmax : scalar
First and last sample to include in the epochs in seconds.
baseline : tuple(tmin, tmax) | ``None``
Time interval for baseline correction. Tmin/tmax in seconds, or None to
use all the data (e.g., ``(None, 0)`` uses all the data from the
beginning of the epoch up to t=0). ``baseline=None`` for no baseline
correction (default).
target : str
Name for the Epochs object in the Dataset.
*others* :
Any additional keyword arguments are forwarded to the mne Epochs
object initialization.
"""
kwargs['preload'] = True
epochs_ = mne_epochs(ds, tmin, tmax, baseline, **kwargs)
ds = _trim_ds(ds, epochs_)
ds[target] = epochs_
return ds
def _mne_events(ds=None, i_start='i_start', trigger='trigger'):
"""
Convert events from a Dataset into mne events.
"""
if isinstance(i_start, basestring):
i_start = ds[i_start]
N = len(i_start)
if isinstance(trigger, basestring):
trigger = ds[trigger]
elif trigger is None:
trigger = np.ones(N)
events = np.empty((N, 3), dtype=np.int32)
events[:, 0] = i_start.x
events[:, 1] = 0
events[:, 2] = trigger
return events
def mne_epochs(ds, tmin=-0.1, tmax=0.6, baseline=None, i_start='i_start',
raw=None, drop_bad_chs=True, **kwargs):
"""
Load epochs as :class:`mne.Epochs`.
Parameters
----------
ds : Dataset
Dataset containing a variable which defines epoch cues (i_start).
tmin, tmax : scalar
First and last sample to include in the epochs in seconds.
baseline : tuple(tmin, tmax) | ``None``
Time interval for baseline correction. Tmin/tmax in seconds, or None to
use all the data (e.g., ``(None, 0)`` uses all the data from the
beginning of the epoch up to t=0). ``baseline=None`` for no baseline
correction (default).
i_start : str
name of the variable containing the index of the events.
raw : None | mne Raw
If None, ds.info['raw'] is used.
drop_bad_chs : bool
Drop all channels in raw.info['bads'] form the Epochs. This argument is
ignored if the picks argument is specified.
kwargs
:class:`mne.Epochs` parameters.
"""
if baseline is False:
baseline = None
if raw is None:
raw = ds.info['raw']
if drop_bad_chs and ('picks' not in kwargs) and raw.info['bads']:
kwargs['picks'] = mne.pick_types(raw.info, eeg=True, eog=True, ref_meg=False)
events = _mne_events(ds=ds, i_start=i_start)
# epochs with (event_id == None) does not use columns 1 and 2 of events
events[:, 1] = np.arange(len(events))
epochs = mne.Epochs(raw, events, None, tmin, tmax, baseline, **kwargs)
if kwargs.get('reject', None) is None and len(epochs) != len(events):
logger.warn("%s: MNE generated fewer Epochs than there are events. "
"The raw file might end before the end of the last epoch."
% raw.info['filename'])
# add bad channels from ds
if BAD_CHANNELS in ds.info:
invalid = []
for ch_name in ds.info[BAD_CHANNELS]:
if ch_name not in epochs.ch_names:
invalid.append(ch_name)
elif ch_name not in epochs.info['bads']:
epochs.info['bads'].append(ch_name)
if invalid:
suffix = 's' * bool(invalid)
raise ValueError("Invalid channel%s in ds.info[%r]: %s"
% (suffix, BAD_CHANNELS, ', '.join(invalid)))
return epochs
def sensor_dim(fiff, picks=None, sysname=None):
"""
Create a Sensor dimension object based on the info in a fiff object.
Parameters
----------
fiff : mne-python object
Object that has a .info attribute that contains measurement info.
picks : None | array of int
Channel picks (as used in mne-python). If None (default) all channels
are included.
sysname : str
Name of the sensor system (used to load sensor connectivity).
Returns
-------
sensor_dim : Sensor
Sensor dimension object.
"""
info = fiff.info
if picks is None:
chs = info['chs']
else:
chs = [info['chs'][i] for i in picks]
ch_locs = []
ch_names = []
for ch in chs:
x, y, z = ch['loc'][:3]
ch_name = ch['ch_name']
ch_locs.append((x, y, z))
ch_names.append(ch_name)
if sysname:
c_matrix, names = mne.channels.read_ch_connectivity(sysname)
# fix channel names
if sysname.startswith('neuromag'):
names = [n[:3] + ' ' + n[3:] for n in names]
# fix channel order
if names != ch_names:
index = np.array([names.index(name) for name in ch_names])
c_matrix = c_matrix[index][:, index]
conn = _matrix_graph(c_matrix)
else:
conn = None
return Sensor(ch_locs, ch_names, sysname=sysname, connectivity=conn)
def epochs_ndvar(epochs, name='meg', data=None, exclude='bads', mult=1,
info=None, sensors=None, vmax=None, sysname=None):
"""
Convert an :class:`mne.Epochs` object to an :class:`NDVar`.
Parameters
----------
epochs : mne.Epochs | str
The epochs object or path to an epochs FIFF file.
name : None | str
Name for the NDVar.
data : 'eeg' | 'mag' | 'grad' | None
The kind of data to include. If None (default) based on ``epochs.info``.
exclude : list of string | str
Channels to exclude (:func:`mne.pick_types` kwarg).
If 'bads' (default), exclude channels in info['bads'].
If empty do not exclude any.
mult : scalar
multiply all data by a constant.
info : None | dict
Additional contents for the info dictionary of the NDVar.
sensors : None | Sensor
The default (``None``) reads the sensor locations from the fiff file.
If the fiff file contains incorrect sensor locations, a different
Sensor can be supplied through this kwarg.
vmax : None | scalar
Set a default range for plotting.
sysname : str
Name of the sensor system (used to load sensor connectivity).
"""
if isinstance(epochs, basestring):
epochs = mne.read_epochs(epochs)
if data is None:
data = _guess_ndvar_data_type(epochs.info)
if data == 'eeg' or data == 'eeg&eog':
info_ = _cs.eeg_info(vmax, mult)
summary_vmax = 0.1 * vmax if vmax else None
summary_info = _cs.eeg_info(summary_vmax, mult)
elif data == 'mag':
info_ = _cs.meg_info(vmax, mult)
summary_vmax = 0.1 * vmax if vmax else None
summary_info = _cs.meg_info(summary_vmax, mult)
elif data == 'grad':
info_ = _cs.meg_info(vmax, mult, 'T/cm', u'∆U')
summary_vmax = 0.1 * vmax if vmax else None
summary_info = _cs.meg_info(summary_vmax, mult, 'T/cm', u'∆U')
else:
raise ValueError("data=%r" % data)
info_.update(proj='z root', samplingrate=epochs.info['sfreq'],
summary_info=summary_info)
if info:
info_.update(info)
x = epochs.get_data()
picks = _picks(epochs.info, data, exclude)
if len(picks) < x.shape[1]:
x = x[:, picks]
if mult != 1:
x *= mult
sensor = sensors or sensor_dim(epochs, picks, sysname)
time = UTS(epochs.times[0], 1. / epochs.info['sfreq'], len(epochs.times))
return NDVar(x, ('case', sensor, time), info=info_, name=name)
def evoked_ndvar(evoked, name='meg', data=None, exclude='bads', vmax=None,
sysname=None):
"""
Convert one or more mne :class:`Evoked` objects to an :class:`NDVar`.
Parameters
----------
evoked : str | Evoked | list of Evoked
The Evoked to convert to NDVar. Can be a string designating a file
path to a evoked fiff file containing only one evoked.
name : str
Name of the NDVar.
data : 'eeg' | 'mag' | 'grad' | None
The kind of data to include. If None (default) based on ``epochs.info``.
exclude : list of string | string
Channels to exclude (:func:`mne.pick_types` kwarg).
If 'bads' (default), exclude channels in info['bads'].
If empty do not exclude any.
vmax : None | scalar
Set a default range for plotting.
sysname : str
Name of the sensor system (used to load sensor connectivity).
Notes
-----
If evoked objects have different channels, the intersection is used (i.e.,
only the channels present in all objects are retained).
"""
if isinstance(evoked, basestring):
evoked = mne.Evoked(evoked)
if data is None:
if isinstance(evoked, (tuple, list)):
data_set = {_guess_ndvar_data_type(e.info) for e in evoked}
if len(data_set) > 1:
raise ValueError("Different Evoked objects contain different "
"data types: %s" % ', '.join(data_set))
data = data_set.pop()
else:
data = _guess_ndvar_data_type(evoked.info)
if data == 'mag':
info = _cs.meg_info(vmax)
elif data == 'eeg':
info = _cs.eeg_info(vmax)
elif data == 'grad':
info = _cs.meg_info(vmax, unit='T/cm')
else:
raise ValueError("data=%s" % repr(data))
if isinstance(evoked, mne.Evoked):
picks = _picks(evoked.info, data, exclude)
x = evoked.data[picks]
sensor = sensor_dim(evoked, picks, sysname)
time = UTS.from_int(evoked.first, evoked.last, evoked.info['sfreq'])
dims = (sensor, time)
else:
e0 = evoked[0]
# find common channels
all_chs = set(e0.info['ch_names'])
exclude = set(e0.info['bads'])
times = e0.times
for e in evoked[1:]:
chs = set(e.info['ch_names'])
all_chs.update(chs)
exclude.update(e.info['bads'])
missing = all_chs.difference(chs)
exclude.update(missing)
if not np.all(e.times == times):
raise ValueError("Not all evoked have the same time points.")
# get data
x = []
sensor = None
exclude = list(exclude)
for e in evoked:
picks = _picks(e.info, data, exclude)
x.append(e.data[picks])
if sensor is None:
sensor = sensor_dim(e, picks, sysname)
time = UTS.from_int(e0.first, e0.last, e0.info['sfreq'])
dims = ('case', sensor, time)
return NDVar(x, dims, info=info, name=name)
def stc_ndvar(stc, subject, src, subjects_dir=None, method=None, fixed=None,
name=None, check=True, parc='aparc'):
"""
Convert one or more :class:`mne.SourceEstimate` objects to an :class:`NDVar`.
Parameters
----------
stc : SourceEstimate | list of SourceEstimates | str
The source estimate object(s) or a path to an stc file.
subject : str
MRI subject (used for loading MRI in PySurfer plotting)
src : str
The kind of source space used (e.g., 'ico-4').
subjects_dir : None | str
The path to the subjects_dir (needed to locate the source space
file).
method : 'MNE' | 'dSPM' | 'sLORETA'
Source estimation method (optional, used for generating info).
fixed : bool
Source estimation orientation constraint (optional, used for generating
info).
name : str | None
Ndvar name.
check : bool
If multiple stcs are provided, check if all stcs have the same times
and vertices.
parc : None | str
Parcellation to add to the source space.
"""
subjects_dir = mne.utils.get_subjects_dir(subjects_dir)
if isinstance(stc, basestring):
stc = mne.read_source_estimate(stc)
# construct data array
if isinstance(stc, _BaseSourceEstimate):
case = False
x = stc.data
else:
case = True
stcs = stc
stc = stcs[0]
if check:
times = stc.times
vertices = stc.vertices
for stc_ in stcs[1:]:
assert np.array_equal(stc_.times, times)
for v1, v0 in izip_longest(stc_.vertices, vertices):
assert np.array_equal(v1, v0)
x = np.array([s.data for s in stcs])
# Construct NDVar Dimensions
time = UTS(stc.tmin, stc.tstep, stc.shape[1])
if isinstance(stc, mne.VolSourceEstimate):
ss = SourceSpace([stc.vertices], subject, src, subjects_dir, parc)
else:
ss = SourceSpace(stc.vertices, subject, src, subjects_dir, parc)
if case:
dims = ('case', ss, time)
else:
dims = (ss, time)
# find the right measurement info
info = {}
if fixed is False:
info['meas'] = 'Activation'
if method == 'MNE' or method == 'dSPM' or method == 'sLORETA':
info['unit'] = method
elif method is not None:
raise ValueError("method=%s" % repr(method))
elif fixed is True:
info['meas'] = 'Current Estimate'
if method == 'MNE':
info['unit'] = 'Am'
elif method == 'dSPM' or method == 'sLORETA':
info['unit'] = '%s(Am)' % method
elif method is not None:
raise ValueError("method=%s" % repr(method))
elif fixed is not None:
raise ValueError("fixed=%s" % repr(fixed))
return NDVar(x, dims, info, name)
def _trim_ds(ds, epochs):
"""
Trim a Dataset to account for rejected epochs. If no epochs were rejected,
the original ds is rturned.
Parameters
----------
ds : Dataset
Dataset that was used to construct epochs.
epochs : Epochs
Epochs loaded with mne_epochs()
"""
if len(epochs) < ds.n_cases:
index = epochs.events[:, 1]
ds = ds.sub(index)
return ds
|
# coding: utf-8
# Standard Python libraries
from io import IOBase
from pathlib import Path
from typing import Optional, Union
import numpy as np
# https://github.com/usnistgov/atomman
import atomman as am
import atomman.unitconvert as uc
# https://github.com/usnistgov/DataModelDict
from DataModelDict import DataModelDict as DM
# iprPy imports
from .. import Calculation
from .point_defect_static import calc
from ...calculation_subset import (LammpsPotential, LammpsCommands, Units,
AtommanSystemLoad, AtommanSystemManipulate,
LammpsMinimize, PointDefect)
class PointDefectStatic(Calculation):
"""Class for managing point defect formation calculations"""
############################# Core properties #################################
def __init__(self,
model: Union[str, Path, IOBase, DM, None]=None,
name: Optional[str]=None,
database = None,
params: Union[str, Path, IOBase, dict] = None,
**kwargs: any):
"""
Initializes a Calculation object for a given style.
Parameters
----------
model : str, file-like object or DataModelDict, optional
Record content in data model format to read in. Cannot be given
with params.
name : str, optional
The name to use for saving the record. By default, this should be
the calculation's key.
database : yabadaba.Database, optional
A default Database to associate with the Record, typically the
Database that the Record was obtained from. Can allow for Record
methods to perform Database operations without needing to specify
which Database to use.
params : str, file-like object or dict, optional
Calculation input parameters or input parameter file. Cannot be
given with model.
**kwargs : any
Any other core Calculation record attributes to set. Cannot be
given with model.
"""
# Initialize subsets used by the calculation
self.__potential = LammpsPotential(self)
self.__commands = LammpsCommands(self)
self.__units = Units(self)
self.__system = AtommanSystemLoad(self)
self.__system_mods = AtommanSystemManipulate(self)
self.__minimize = LammpsMinimize(self)
self.__defect = PointDefect(self)
subsets = (self.commands, self.potential, self.system,
self.system_mods, self.minimize, self.defect, self.units)
# Initialize unique calculation attributes
self.__system_base = None
self.__system_defect = None
self.__dumpfile_base = None
self.__dumpfile_defect = None
self.__symbols_base = None
self.__symbols_defect = None
self.__natoms_base = None
self.__natoms_defect = None
self.__potential_energy_base = None
self.__potential_energy_defect = None
self.__potential_energy = None
self.__formation_energy = None
self.__dipole_tensor = None
self.__has_reconfigured = None
self.__centrosummation = None
self.__position_shift = None
self.__db_vect_shift = None
# Define calc shortcut
self.calc = calc
# Call parent constructor
super().__init__(model=model, name=name, database=database, params=params,
subsets=subsets, **kwargs)
@property
def filenames(self) -> list:
"""list: the names of each file used by the calculation."""
return [
'point_defect_static.py',
'min.template'
]
############################## Class attributes ################################
@property
def commands(self) -> LammpsCommands:
"""LammpsCommands subset"""
return self.__commands
@property
def potential(self) -> LammpsPotential:
"""LammpsPotential subset"""
return self.__potential
@property
def units(self) -> Units:
"""Units subset"""
return self.__units
@property
def system(self) -> AtommanSystemLoad:
"""AtommanSystemLoad subset"""
return self.__system
@property
def system_mods(self) -> AtommanSystemManipulate:
"""AtommanSystemManipulate subset"""
return self.__system_mods
@property
def minimize(self) -> LammpsMinimize:
"""LammpsMinimize subset"""
return self.__minimize
@property
def defect(self) -> PointDefect:
"""PointDefect subset"""
return self.__defect
@property
def dumpfile_base(self) -> str:
"""str: Name of the LAMMPS dump file of the base system"""
if self.__dumpfile_base is None:
raise ValueError('No results yet!')
return self.__dumpfile_base
@property
def dumpfile_defect(self) -> str:
"""str: Name of the LAMMPS dump file of the defect system"""
if self.__dumpfile_defect is None:
raise ValueError('No results yet!')
return self.__dumpfile_defect
@property
def symbols_base(self) -> list:
"""list: Model symbols for the base system"""
if self.__symbols_base is None:
raise ValueError('No results yet!')
return self.__symbols_base
@property
def symbols_defect(self) -> list:
"""list: Model symbols for the defect system"""
if self.__symbols_defect is None:
raise ValueError('No results yet!')
return self.__symbols_defect
@property
def natoms_base(self) -> int:
"""int: Number of atoms in the base system"""
if self.__natoms_base is None:
raise ValueError('No results yet!')
return self.__natoms_base
@property
def natoms_defect(self) -> int:
"""int: Number of atoms in the defect system"""
if self.__natoms_defect is None:
raise ValueError('No results yet!')
return self.__natoms_defect
@property
def system_base(self) -> am.System:
"""atomman.System: The base system"""
if self.__system_base is None:
raise ValueError('System not set/loaded!')
return self.__system_base
@property
def system_defect(self) -> am.System:
"""atomman.System: The defect system"""
if self.__system_defect is None:
raise ValueError('System not set/loaded!')
return self.__system_defect
@system_defect.setter
def system_defect(self, val: Optional[am.System]):
if isinstance(val, am.System) or val is None:
self.__system_defect = val
else:
raise ValueError('system_defect must be a System or None')
@property
def potential_energy_base(self) -> float:
"""float: Potential energy of the base system"""
if self.__potential_energy_base is None:
raise ValueError('No results yet!')
return self.__potential_energy_base
@property
def potential_energy_defect(self) -> float:
"""float: Potential energy of the defect system"""
if self.__potential_energy_defect is None:
raise ValueError('No results yet!')
return self.__potential_energy_defect
@property
def potential_energy(self) -> float:
"""float: Potential energy per atom for the base system"""
if self.__potential_energy is None:
raise ValueError('No results yet!')
return self.__potential_energy
@property
def formation_energy(self) -> float:
"""float: Point defect formation energy"""
if self.__formation_energy is None:
raise ValueError('No results yet!')
return self.__formation_energy
@property
def dipole_tensor(self) -> np.ndarray:
"""numpy.NDArray: The elastic dipole tensor for the point defect"""
if self.__dipole_tensor is None:
raise ValueError('No results yet!')
return self.__dipole_tensor
@property
def has_reconfigured(self) -> bool:
"""bool: Flag for if checks indicate the defect has reconfigured"""
if self.__has_reconfigured is None:
raise ValueError('No results yet!')
return self.__has_reconfigured
@property
def centrosummation(self) -> np.ndarray:
"""numpy.NDArray: sum of relative neighbor coordinates after relaxing"""
if self.__centrosummation is None:
raise ValueError('No results yet!')
return self.__centrosummation
@property
def position_shift(self) -> np.ndarray:
"""numpy.NDArray: shift in defect's position after relaxing"""
if self.__position_shift is None:
raise ValueError('Value not set!')
return self.__position_shift
@property
def db_vect_shift(self) -> np.ndarray:
"""numpy.NDArray: Change in dumbbell vector after relaxing"""
if self.__db_vect_shift is None:
raise ValueError('Value not set!')
return self.__db_vect_shift
def load_system_base(self):
"""Load the base system from the database"""
if self.__system_base is None:
fname = self.dumpfile_base
tar = self.database.get_tar(record=self)
f = tar.extractfile(fname)
self.__system_base = am.load('atom_dump', f, symbols=self.symbols_base)
def load_system_defect(self):
"""Load the defect system from the database"""
if self.__system_defect is None:
fname = self.dumpfile_defect
tar = self.database.get_tar(record=self)
f = tar.extractfile(fname)
self.__system_defect = am.load('atom_dump', f, symbols=self.symbols_defect)
def set_values(self,
name: Optional[str] = None,
**kwargs: any):
"""
Set calculation values directly. Any terms not given will be set
or reset to the calculation's default values.
Parameters
----------
name : str, optional
The name to assign to the calculation. By default, this is set as
the calculation's key.
**kwargs : any, optional
Any keyword parameters supported by the set_values() methods of
the parent Calculation class and the subset classes.
"""
# Call super to set universal and subset content
super().set_values(name=name, **kwargs)
# Set calculation-specific values
####################### Parameter file interactions ###########################
def load_parameters(self,
params: Union[dict, str, IOBase],
key: Optional[str] = None):
"""
Reads in and sets calculation parameters.
Parameters
----------
params : dict, str or file-like object
The parameters or parameter file to read in.
key : str, optional
A new key value to assign to the object. If not given, will use
calc_key field in params if it exists, or leave the key value
unchanged.
"""
# Load universal content
input_dict = super().load_parameters(params, key=key)
# Load input/output units
self.units.load_parameters(input_dict)
# Change default values for subset terms
input_dict['sizemults'] = input_dict.get('sizemults', '5 5 5')
input_dict['forcetolerance'] = input_dict.get('forcetolerance',
'1.0e-6 eV/angstrom')
# Load calculation-specific strings
# Load calculation-specific booleans
# Load calculation-specific integers
# Load calculation-specific unitless floats
# Load calculation-specific floats with units
# Load LAMMPS commands
self.commands.load_parameters(input_dict)
# Load minimization parameters
self.minimize.load_parameters(input_dict)
# Load LAMMPS potential
self.potential.load_parameters(input_dict)
# Load initial system
self.system.load_parameters(input_dict)
# Load defect parameters
self.defect.load_parameters(input_dict)
# Manipulate system
self.system_mods.load_parameters(input_dict)
def master_prepare_inputs(self,
branch: str = 'main',
**kwargs: any) -> dict:
"""
Utility method that build input parameters for prepare according to the
workflows used by the NIST Interatomic Potentials Repository. In other
words, transforms inputs from master_prepare into inputs for prepare.
Parameters
----------
branch : str, optional
Indicates the workflow branch to prepare calculations for. Default
value is 'main'.
**kwargs : any
Any parameter modifications to make to the standard workflow
prepare scripts.
Returns
-------
params : dict
The full set of prepare parameters based on the workflow branch
"""
# Initialize params and copy over branch
params = {}
params['branch'] = branch
# main branch
if branch == 'main':
# Check for required kwargs
assert 'lammps_command' in kwargs
# Set default workflow settings
params['buildcombos'] = [
'atomicparent load_file parent',
'defect pointdefect_file'
]
params['parent_record'] = 'relaxed_crystal'
params['parent_method'] = 'dynamic'
params['parent_standing'] = 'good'
params['defect_record'] = 'point_defect'
params['sizemults'] = '12 12 12'
params['forcetolerance'] = '1e-8'
# Copy kwargs to params
for key in kwargs:
# Rename potential-related terms for buildcombos
if key[:10] == 'potential_':
params[f'parent_{key}'] = kwargs[key]
# Copy/overwrite other terms
else:
params[key] = kwargs[key]
else:
raise ValueError(f'Unknown branch {branch}')
return params
@property
def singularkeys(self) -> list:
"""list: Calculation keys that can have single values during prepare."""
keys = (
# Universal keys
super().singularkeys
# Subset keys
+ self.commands.keyset
+ self.units.keyset
# Calculation-specific keys
)
return keys
@property
def multikeys(self) -> list:
"""list: Calculation key sets that can have multiple values during prepare."""
keys = (
# Universal multikeys
super().multikeys +
# Combination of potential and system keys
[
self.potential.keyset +
self.system.keyset
] +
# System mods keys
[
self.system_mods.keyset
] +
# Defect keys
[
self.defect.keyset
] +
# Minimization keys
[
self.minimize.keyset
]
)
return keys
########################### Data model interactions ###########################
@property
def modelroot(self) -> str:
"""str: The root element of the content"""
return 'calculation-point-defect-static'
def build_model(self) -> DM:
"""
Generates and returns model content based on the values set to object.
"""
# Build universal content
model = super().build_model()
calc = model[self.modelroot]
# Build subset content
self.commands.build_model(calc, after='atomman-version')
self.potential.build_model(calc, after='calculation')
self.system.build_model(calc, after='potential-LAMMPS')
self.system_mods.build_model(calc)
self.defect.build_model(calc, after='system-info')
self.minimize.build_model(calc)
# Build results
if self.status == 'finished':
calc['defect-free-system'] = DM()
calc['defect-free-system']['artifact'] = DM()
calc['defect-free-system']['artifact']['file'] = self.dumpfile_base
calc['defect-free-system']['artifact']['format'] = 'atom_dump'
calc['defect-free-system']['symbols'] = self.symbols_base
calc['defect-free-system']['potential-energy'] = uc.model(self.potential_energy_base,
self.units.energy_unit)
calc['defect-system'] = DM()
calc['defect-system']['artifact'] = DM()
calc['defect-system']['artifact']['file'] = self.dumpfile_defect
calc['defect-system']['artifact']['format'] = 'atom_dump'
calc['defect-system']['symbols'] = self.symbols_defect
calc['defect-system']['potential-energy'] = uc.model(self.potential_energy_defect,
self.units.energy_unit)
# Save the calculation results
calc['cohesive-energy'] = uc.model(self.potential_energy,
self.units.energy_unit)
calc['number-of-atoms'] = self.natoms_defect
calc['defect-formation-energy'] = uc.model(self.formation_energy,
self.units.energy_unit)
calc['defect-elastic-dipole-tensor'] = uc.model(self.dipole_tensor,
self.units.energy_unit)
# Save the reconfiguration checks
calc['reconfiguration-check'] = r_c = DM()
r_c['has_reconfigured'] = self.has_reconfigured
r_c['centrosummation'] = self.centrosummation.tolist()
if self.__position_shift is not None:
r_c['position_shift'] = self.position_shift.tolist()
if self.__db_vect_shift is not None:
r_c['db_vect_shift'] = self.db_vect_shift.tolist()
self._set_model(model)
return model
def load_model(self,
model: Union[str, DM],
name: Optional[str] = None):
"""
Loads record contents from a given model.
Parameters
----------
model : str or DataModelDict
The model contents of the record to load.
name : str, optional
The name to assign to the record. Often inferred from other
attributes if not given.
"""
# Load universal and subset content
super().load_model(model, name=name)
calc = self.model[self.modelroot]
# Load results
if self.status == 'finished':
self.__dumpfile_base = calc['defect-free-system']['artifact']['file']
self.__symbols_base = calc['defect-free-system']['symbols']
self.__potential_energy_base = uc.value_unit(calc['defect-free-system']['potential-energy'])
self.__natoms_base = None
self.__dumpfile_defect= calc['defect-system']['artifact']['file']
self.__symbols_defect = calc['defect-system']['symbols']
self.__potential_energy_defect = uc.value_unit(calc['defect-system']['potential-energy'])
self.__natoms_defect = calc['number-of-atoms']
self.__potential_energy = uc.value_unit(calc['cohesive-energy'])
self.__formation_energy = uc.value_unit(calc['defect-formation-energy'])
self.__dipole_tensor = uc.value_unit(calc['defect-elastic-dipole-tensor'])
# Save the reconfiguration checks
r_c = calc['reconfiguration-check']
self.__has_reconfigured = r_c['has_reconfigured']
self.__centrosummation = np.array(r_c['centrosummation'])
if 'position_shift' in r_c:
self.__position_shift = np.array(r_c['position_shift'])
else:
self.__position_shift = None
if 'db_vect_shift' in r_c:
self.__db_vect_shift = np.array(r_c['db_vect_shift'])
else:
self.__db_vect_shift = None
########################## Metadata interactions ##############################
def metadata(self) -> dict:
"""
Generates a dict of simple metadata values associated with the record.
Useful for quickly comparing records and for building pandas.DataFrames
for multiple records of the same style.
"""
# Call super to extract universal and subset content
meta = super().metadata()
# Extract calculation-specific content
# Extract results
if self.status == 'finished':
meta['dumpfile_base'] = self.dumpfile_base
meta['dumpfile_defect'] = self.dumpfile_defect
meta['E_pot_base'] = self.potential_energy_base
meta['E_pot_defect'] = self.potential_energy_defect
meta['E_pot'] = self.potential_energy
meta['E_ptd_f'] = self.formation_energy
meta['pij'] = self.dipole_tensor
meta['has_reconfigured'] = self.has_reconfigured
meta['centrosummation'] = self.centrosummation
try:
meta['position_shift'] = self.position_shift
except ValueError:
pass
try:
meta['db_vect_shift'] = self.db_vect_shift
except ValueError:
pass
return meta
@property
def compare_terms(self) -> list:
"""list: The terms to compare metadata values absolutely."""
return [
'script',
'load_file',
'load_options',
'symbols',
'potential_LAMMPS_key',
'potential_key',
'a_mult',
'b_mult',
'c_mult',
'pointdefect_key',
]
@property
def compare_fterms(self) -> dict:
"""dict: The terms to compare metadata values using a tolerance."""
return {}
def isvalid(self) -> bool:
return self.system.family == self.defect.family
########################### Calculation interactions ##########################
def calc_inputs(self) -> dict:
"""Builds calculation inputs from the class's attributes"""
# Initialize input_dict
input_dict = {}
# Add subset inputs
for subset in self.subsets:
subset.calc_inputs(input_dict)
# Modify inputs for calculation
input_dict['cutoff'] = 1.05 * input_dict['ucell'].box.a
del input_dict['ucell']
del input_dict['transform']
# Return input_dict
return input_dict
def process_results(self, results_dict: dict):
"""
Processes calculation results and saves them to the object's results
attributes.
Parameters
----------
results_dict: dict
The dictionary returned by the calc() method.
"""
self.__dumpfile_base = results_dict['dumpfile_base']
self.__dumpfile_defect = results_dict['dumpfile_ptd']
self.__system_base = results_dict['system_base']
self.__system_defect = results_dict['system_ptd']
self.__symbols_base = results_dict['system_base'].symbols
self.__symbols_defect = results_dict['system_ptd'].symbols
self.__natoms_base = results_dict['system_base'].natoms
self.__natoms_defect = results_dict['system_ptd'].natoms
self.__potential_energy_base = results_dict['E_total_base']
self.__potential_energy_defect = results_dict['E_total_ptd']
self.__potential_energy = results_dict['E_pot']
self.__formation_energy = results_dict['E_ptd_f']
self.__dipole_tensor = results_dict['pij_tensor']
self.__has_reconfigured = results_dict['has_reconfigured']
self.__centrosummation = results_dict['centrosummation']
self.__position_shift = results_dict.get('position_shift', None)
self.__db_vect_shift = results_dict.get('db_vect_shift', None)
|
import pandas as pd
from sklearn import preprocessing
from preprocessing import read, split, non_numerical_features, one_hot_encoding
from preprocessing import drop_features, deal_with_23 , deal_with_58
from postprocessing import writeoutput
from csv import DictReader, DictWriter
from sklearn.feature_selection import VarianceThreshold
from sklearn.externals import joblib
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.naive_bayes import GaussianNB
import time
from sklearn.ensemble import RandomForestClassifier
from csv import DictReader, DictWriter
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn import svm
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
start = time.time()
#data = read('data_test.csv')
#quiz = read('quiz_test.csv')
data = read('data.csv')
label = data['label']
data = data.drop('label', axis = 1)
print(data)
quiz = read('quiz.csv')
#data = deal_with_23(data)
#quiz = deal_with_23(quiz)
#data = deal_with_58(data)
#quiz = deal_with_58(quiz)
print(data.shape)
print(quiz.shape)
data = data.drop('23', axis = 1)
quiz = quiz.drop('23', axis = 1)
data = data.drop('58', axis = 1)
quiz = quiz.drop('58', axis = 1)
categories = non_numerical_features(data)
print(categories)
data, quiz = one_hot_encoding(data, quiz,categories)
data = drop_features(data, categories)
quiz = drop_features(quiz, categories)
print(data.shape)
print(quiz.shape)
train_data = preprocessing.normalize(data)
test_data = preprocessing.normalize(quiz)
print("Entering the learing phase")
'''
print("-------------------------------------")
print("Adaboost Classifier 1-100 ")
model1 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1), algorithm="SAMME", n_estimators=100)
train_data = data.values
test_data = quiz.values
model1 = model1.fit(train_data,label.values.T)
output = model1.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
outputA = model1.predict(test_data)
print("-------------------------------------")
print("Adaboost Classifier 1-200 ")
model2 = AdaBoostClassifier(DecisionTreeClassifier(max_depth=10), algorithm="SAMME", n_estimators=500)
train_data = data.values
test_data = quiz.values
model2 = model2.fit(train_data,label.values.T)
output = model2.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output1 = model2.predict(test_data)
writeoutput('output1.csv',output1)
outputA = output1
print("-------------------------------------")
print("Random Forest Classifier 300 ")
model3 = RandomForestClassifier(n_estimators = 500)
model3 = model3.fit(train_data,label.values.T)
output = model3.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output2 = model3.predict(test_data)
writeoutput('output2.csv',output2)
print("-------------------------------------")
print("Logical Regression ")
model4 = LogisticRegression()
model4 = model4.fit(train_data,label.values.T)
output = model4.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
output3 = model4.predict(test_data)
writeoutput('output3.csv',output3)
'''
print("-------------------------------------")
print("Neural Network")
model5 = MLPClassifier(algorithm='l-bfgs', alpha=1e-5, hidden_layer_sizes=(5, 2), random_state=1)
model5 = model5.fit(train_data,label.values.T)
output = model5.predict(train_data)
correct = 0
for i in range(0,len(output)):
if output[i] == label[i]:
correct = correct + 1
print("Correct: ")
print(correct)
elapsed = done - start
print(elapsed)
|
# 03. pyqt_paint_event.py
# PyQt Paint Event
import sys
from PyQt5.QtGui import QPainter, QPen, QBrush, QColor
from PyQt5.QtCore import QDate, Qt
from PyQt5.QtWidgets import QApplication, QWidget, QPushButton
class MyApp(QWidget):
def __init__(self):
super().__init__()
self.setFixedSize(200, 300)
self.setWindowTitle('GA Mario')
self.show()
self.button = QPushButton
# 창이 업데이트 될 때마다 실행되는 함수
def paintEvent(self, event):
# 그리기 도구
painter = QPainter()
# 그리기 시작
painter.begin(self)
# 펜 설정 (테두리)
painter.setPen(QPen(Qt.red, 2.0, Qt.SolidLine))
# 선 그리기
painter.drawLine(25, 175, 75, 275)
# 펜 설정 (테두리)
painter.setPen(QPen(Qt.blue, 2.0, Qt.SolidLine))
# 선 그리기
painter.drawLine(75, 201, 75, 275)
# 펜 설정 (테두리)
painter.setPen(QPen(Qt.red, 2.0, Qt.SolidLine))
# 선 그리기
painter.drawLine(125, 175, 75, 275)
# RGB 색상으로 펜 설정
painter.setPen(QPen(QColor.fromRgb(0, 0, 0), 1.0, Qt.SolidLine))
# 브러쉬 설정 (채우기)
painter.setBrush(QBrush(Qt.blue))
# 직사각형
painter.drawRect(0, 0, 50, 50)
# RGB 색상으로 펜 설정
painter.setPen(QPen(QColor.fromRgb(0, 0, 0), 1.0, Qt.SolidLine))
# 브러쉬 설정 (채우기)
painter.setBrush(QBrush(Qt.NoBrush))
# 직사각형
painter.drawRect(50, 0, 50, 50)
# RGB 색상으로 펜 설정
painter.setPen(QPen(QColor.fromRgb(0, 0, 0), 1.0, Qt.SolidLine))
# 브러쉬 설정 (채우기)
painter.setBrush(QBrush(Qt.NoBrush))
# 직사각형
painter.drawRect(0, 50, 50, 50)
# RGB 색상으로 펜 설정
painter.setPen(QPen(QColor.fromRgb(0, 0, 0), 1.0, Qt.SolidLine))
# 브러쉬 설정 (채우기)
painter.setBrush(QBrush(Qt.red))
# 직사각형
painter.drawRect(50, 50, 50, 50)
painter.setPen(QPen(QColor.fromRgb(0, 0, 0), 1.0, Qt.SolidLine))
# RGB 색상으로 브러쉬 설정
painter.setBrush(QBrush(QColor.fromRgb(64, 244, 208)))
# 타원 그리기
painter.drawEllipse(0, 150, 50, 50)
painter.setPen(QPen(QColor.fromRgb(0, 0, 0), 1.0, Qt.SolidLine))
# RGB 색상으로 브러쉬 설정
painter.setBrush(QBrush(QColor.fromRgb(64, 244, 208)))
# 타원 그리기
painter.drawEllipse(100, 150, 50, 50)
painter.setPen(QPen(QColor.fromRgb(0, 0, 0), 1.0, Qt.SolidLine))
# RGB 색상으로 브러쉬 설정
painter.setBrush(QBrush(Qt.NoBrush))
# 타원 그리기
painter.drawEllipse(50, 150, 50, 50)
painter.setPen(QPen(QColor.fromRgb(0, 0, 0), 1.0, Qt.SolidLine))
# RGB 색상으로 브러쉬 설정
painter.setBrush(QBrush(QColor.fromRgb(178, 178, 178)))
# 타원 그리기
painter.drawEllipse(50, 250, 50, 50)
painter.setPen(QPen(Qt.cyan, 1.0, Qt.SolidLine))
painter.setBrush(Qt.NoBrush)
now = QDate.currentDate()
self.button.setText(now.toString('d.M.yy'))
self.button.setGeometry(100, 250, 100, 50)
painter.end()
self.show()
if __name__ == '__main__':
app = QApplication(sys.argv)
window = MyApp()
sys.exit(app.exec_())
|
# Flip Flopper
'''
Flip = input("Input Something To Be Flipped: ")
Flip2 = Flip
print(Flip2)
'''
flip = input("Input Something To Be Flipped: ")
def change(input_str):
return input_str[-1] + input_str[1:-1] + input_str[0]
print()
|
import os
import PyPDF2
## create path, reader, and writer objects
path = "sample.pdf"
pdf = PyPDF2.PdfFileReader(path, 'rb')
writer = PyPDF2.PdfFileWriter()
## loop to add all pages in PDF to writer object
for x in range(pdf.numPages):
page = pdf.getPage(x)
writer.addPage(page)
## encrypt method, set user password, owner password, and use 128 bit vs 40 bit encryption
writer.encrypt(user_pwd = 'password', owner_pwd = None, use_128bit = True)
output = open("encrypted.pdf", 'wb')
writer.write(output)
print ("Done")
output.close()
|
#!/usr/bin/env python3
# pylint: disable=maybe-no-member
#
# This file is part of LUNA.
#
# Copyright (c) 2020 Great Scott Gadgets <info@greatscottgadgets.com>
# SPDX-License-Identifier: BSD-3-Clause
import sys
import time
from prompt_toolkit import HTML
from prompt_toolkit import print_formatted_text as pprint
from amaranth import Signal, Elaboratable, Module, Cat, ClockDomain, ClockSignal, ResetInserter
from amaranth.lib.cdc import FFSynchronizer
from luna import top_level_cli
from apollo_fpga import ApolloDebugger, ApolloILAFrontend
from luna.gateware.debug.ila import SyncSerialILA
from luna.gateware.utils.cdc import synchronize
from luna.gateware.architecture.car import LunaECP5DomainGenerator
from luna.gateware.interface.spi import SPIRegisterInterface, SPIMultiplexer, SPIBus
from luna.gateware.interface.ulpi import UTMITranslator
from luna.gateware.usb.analyzer import USBAnalyzer
DATA_AVAILABLE = 1
ANALYZER_RESULT = 2
class ULPIDiagnostic(Elaboratable):
""" Gateware that evalutes ULPI PHY functionality. """
def elaborate(self, platform):
m = Module()
# Generate our clock domains.
clocking = LunaECP5DomainGenerator()
m.submodules.clocking = clocking
# Grab a reference to our debug-SPI bus.
board_spi = synchronize(m, platform.request("debug_spi").i)
# Create our SPI-connected registers.
m.submodules.spi_registers = spi_registers = SPIRegisterInterface(7, 8)
m.d.comb += spi_registers.spi.connect(board_spi)
# Create our UTMI translator.
ulpi = platform.request(platform.default_usb_connection)
m.submodules.utmi = utmi = UTMITranslator(ulpi=ulpi)
# Strap our power controls to be in VBUS passthrough by default,
# on the target port.
m.d.comb += [
platform.request("power_a_port").o .eq(0),
platform.request("pass_through_vbus").o .eq(1),
]
# Hook up our LEDs to status signals.
m.d.comb += [
platform.request("led", 2).o .eq(utmi.session_valid),
platform.request("led", 3).o .eq(utmi.rx_active),
platform.request("led", 4).o .eq(utmi.rx_error)
]
# Set up our parameters.
m.d.comb += [
# Set our mode to non-driving and full speed.
utmi.op_mode .eq(0b01),
utmi.xcvr_select .eq(0b01),
# Disable the DP/DM pull resistors.
utmi.dm_pulldown .eq(0),
utmi.dm_pulldown .eq(0),
utmi.term_select .eq(0)
]
read_strobe = Signal()
# Create a USB analyzer, and connect a register up to its output.
m.submodules.analyzer = analyzer = USBAnalyzer(utmi_interface=utmi)
# Provide registers that indicate when there's data ready, and what the result is.
spi_registers.add_read_only_register(DATA_AVAILABLE, read=analyzer.data_available)
spi_registers.add_read_only_register(ANALYZER_RESULT, read=analyzer.data_out, read_strobe=read_strobe)
m.d.comb += [
platform.request("led", 0).o .eq(analyzer.capturing),
platform.request("led", 1).o .eq(analyzer.data_available),
platform.request("led", 5).o .eq(analyzer.overrun),
analyzer.next .eq(read_strobe)
]
# Debug output.
m.d.comb += [
platform.request("user_io", 0, dir="o") .eq(ClockSignal("usb")),
platform.request("user_io", 1, dir="o") .eq(ulpi.dir),
platform.request("user_io", 2, dir="o") .eq(ulpi.nxt),
platform.request("user_io", 3, dir="o") .eq(analyzer.sampling),
]
# Return our elaborated module.
return m
if __name__ == "__main__":
analyzer = top_level_cli(ULPIDiagnostic)
debugger = ApolloDebugger()
time.sleep(1)
def data_is_available():
return debugger.spi.register_read(DATA_AVAILABLE)
def read_byte():
return debugger.spi.register_read(ANALYZER_RESULT)
def get_next_byte():
while not data_is_available():
time.sleep(0.1)
return read_byte()
# Tiny stateful parser for our analyzer.
while True:
# Grab our header, and process it.
size = (get_next_byte() << 16) | get_next_byte()
# Then read and print out our body
packet = [get_next_byte() for _ in range(size)]
packet_hex = [f"{byte:02x}" for byte in packet]
packet_as_string = bytes(packet)
print(f"{packet_as_string}: {packet_hex}")
#byte = get_next_byte()
#print(f"{byte:02x} ", end="")
#sys.stdout.flush()
|
import pytest
@pytest.fixture
def testdir(request):
testdir = request.getfuncargvalue('testdir')
testdir.makeini('[pytest]\ncodechecks = pyflakes')
return testdir
def test_pyflakes_finds_name_error(testdir):
testdir.makepyfile('''
def tesdt_a():
pass
def b():
abc
''')
out = testdir.runpytest('--tb=short', '-v')
out.stdout.fnmatch_lines([
'*abc*',
'*1 failed*',
])
def test_reportinfo_verbose(testdir):
testdir.makepyfile('''
def xyz():
pass
''')
out = testdir.runpytest('-v')
out.stdout.fnmatch_lines([
'test_reportinfo_verbose.py::pyflakes PASSED*'
])
|
#!/usr/bin/env python
# encoding: utf-8
from bs4 import BeautifulSoup, Comment
import urllib
import re
# html 标签白名单
VALID_TAGS = {
'strong': [],
'em': [],
'span': {'style', },
'p': [],
'h1': [],
'pre': [],
'h2': [],
'h3': [],
'br': [],
'a': {'href', 'title'},
'img': {'src', 'style'}, # 外链图片缓存
'embed': {'type', 'class', 'src', 'width', 'height', 'allowfullscreen', 'allowscriptaccess',
'loop', 'menu', 'play', 'src', 'style', 'wmode'} # 特别处理
}
def get_url_host(url):
""" url中获取域名
"""
pro, rest = urllib.splittype(url)
if not rest:
return None
host, rest = urllib.splithost(rest)
return host
def __valid_attr(tag, attrs):
re_attrs = dict()
if tag == 'span':
valid = {'background-color', 'line-height', 'color', 'font-size'}
attr = attrs.get('style')
if attr:
values = re.findall(r'([\w-]+):', attr)
if set(values).issubset(valid):
re_attrs['style'] = attr
elif tag == 'embed':
#
# b站: http://share.acg.tv/flash.swf?aid=406209&page=1
# a站: http://static.acfun.mm111.net/player/ACFlashPlayer.out.swf?type=page&url=http://www.acfun.tv/v/ac1509412
# c站: http://www.tucao.cc/mini/4040389.swf
# 土豆:
#
default_attr_settings = {
'allowfullscreen': 'true',
'allowscriptaccess': 'never',
'class': ['edui-faked-video'],
'loop': 'false',
'menu': 'false',
'play': 'true',
'style': 'float:none',
'pluginspage': 'http://www.macromedia.com/go/getflashplayer',
'type': 'application/x-shockwave-flash',
'wmode': 'transparent'
}
# todo 支持 奇异, 搜狐, 优酷, 土豆, 乐视等
allow_src_host = {'share.acg.tv', 'static.acfun.mm111.net', 'www.tucao.cc'}
# 检测播放源地址, 只允许固定网站的源
src_value = attrs.get('src')
if src_value:
host = get_url_host(src_value)
if host in allow_src_host: # todo 添加提示
re_attrs['src'] = src_value
re_attrs.update(default_attr_settings)
else:
valid_attrs = VALID_TAGS.get(tag)
for at in valid_attrs:
v = attrs.get(at)
if v:
re_attrs[at] = v
if tag == 'a':
re_attrs['target'] = '_blank'
return re_attrs
def sanitize_html(value, valid_tags=VALID_TAGS):
""" HTML 富文本过滤
参考: https://stackoverflow.com/questions/699468/python-html-sanitizer-scrubber-filter
"""
soup = BeautifulSoup(value)
comments = soup.findAll(text=lambda text: isinstance(text, Comment))
[comment.extract() for comment in comments]
out = soup.renderContents()
while 1:
out = out
soup = BeautifulSoup(out)
for tag in soup.findAll(True):
if tag.name not in valid_tags:
tag.hidden = True
else:
# attrs is a dict
tag.attrs = __valid_attr(tag.name, tag.attrs)
out = soup.renderContents()
if out == out:
break
return out
|
#!/usr/bin/python2.7
#-*- coding: utf-8 -*-
import numpy as np
def kalman_filter(data,Q,R,x0,P0):
N = len(data)
K = np.zeros(N)
X = np.zeros(N)
P = np.zeros(N)
X[0] = x0
P[0] = P0
K_i = 0
P_i_1 = P0
X_i_1 = x0
for i in range(1,N):
#rang(1,N) do not contain N
K_i = P_i_1 / (P_i_1 + R)
X_i = X_i_1 + K_i * (data[i] - X_i_1)
P_i = P_i_1 - K_i * P_i_1 + Q
# print (X_i)
X[i] = X_i
P_i_1 = P_i
X_i_1 = X_i
return X
|
from Dictionaries import supported_commands, materials_dict, directions_dict
from word2number import w2n
class GameCommand:
def __init__(self):
self.is_valid = False
self.command = None
self.command_token = None
self.command_text = ''
self.args = {}
self.arg_methods = {
'build': self.get_build_args, # mmbuild
'place': self.get_place_args,
'move': self.get_move_args,
'track': self.get_track_args, # eyebuild/eyetrack
'turn': self.get_look_args,
'tilt': self.get_look_args,
'store': self.get_storage_args, # mstore
'clone': self.get_storage_args, # mclone,
'give': self.get_give_args
}
def get_game_command_args(self):
comm = self.command
if comm and comm in supported_commands:
if (comm != 'undo' and comm != 'redo'):
self.arg_methods[comm]()
else: self.is_valid = True
def get_build_args(self):
build_shapes = ['wall', 'roof', 'house', 'sphere']
build_shape_selected = False
# Parse for build_shape and hollow tags
for word_token in self.command_token:
word = word_token.text
if not build_shape_selected and word in build_shapes:
build_shape_selected = True
self.args[word] = True
elif word == 'hollow':
self.args['hollow'] = True
# Validate configurations of build_shape and dimensions
dimensions = self.get_dimensions()
if ('wall' in self.args.keys() or 'roof' in self.args.keys()) and len(dimensions) == 2:
dimensions.append(0)
elif len(dimensions) < 3 and not ('sphere' in self.args.keys() and len(dimensions) == 1):
return
# Set dimensions and material
self.args['dimensions'] = dimensions
self.set_material()
# Add build flag if this is a track command
if self.command == 'track': self.args['build'] = True
self.is_valid = True
def get_place_args(self):
self.set_material()
self.is_valid = True
def get_move_args(self):
# Get movement dimensions
dimensions = self.get_dimensions()
if not len(dimensions): return
self.args['dimensions'] = dimensions[0]
# Set movement directions, if direction was not defined, default to 'forward'
self.set_direction()
if 'direction' not in self.args.keys(): self.args['direction'] = 'forward'
self.is_valid = True
def get_track_args(self):
# Search for build or move keywords
for word_token in self.command_token:
if word_token.text == 'build':
self.get_build_args()
elif word_token.text == 'move':
self.args['move'] = True
self.is_valid = True
def get_look_args(self):
# Set the turn/tilt dimension
default_degrees = 90 if self.command == 'turn' else 45
dimensions = self.get_dimensions()
self.args['dimensions'] = dimensions[0] if len(dimensions) else default_degrees
# Set turn/tilt direction, then check if direction was defined
self.set_direction()
if 'direction' in self.args.keys(): self.is_valid = True
def get_storage_args(self):
# use regex to interpret these commands
if self.command == 'clone' and len(self.command_token) > 1:
self.args['name'] = self.command_token[1].text
self.is_valid = True
# somehow extract a name out of this instead of defaulting to the last word
elif self.command == 'store' and len(self.command_token) > 1:
self.args['name'] = self.command_token[-1].text
self.is_valid = True
def get_give_args(self):
# Give the user
if len(self.command_token) > 1:
self.set_material()
dimensions = self.get_dimensions()
self.args['dimensions'] = dimensions[0] if len(dimensions) else 1
self.is_valid = True
def set_material(self):
# Set the first material word found in the token as the block_code argument
for word_token in self.command_token:
if word_token.text in materials_dict.keys():
self.args['block_code'] = materials_dict[word_token.text]
break
# Default to stone if no material word was found
if 'block_code' not in self.args.keys() or self.args['block_code'] is None:
self.args['block_code'] = materials_dict['stone']
def set_direction(self):
# Set the first direction word found in the token as the direction argument
for word_token in self.command_token:
if word_token.text in directions_dict:
self.args['direction'] = word_token.text
break
def get_dimensions(self):
"""
Parse the current command token and return an array of dimensions (ints).
Parameters
----------
None
Returns
-------
dimensions : list
List of parsed numbers (ints) from the tokenized command in the order
that they appear.
"""
defining_number = False
current_number = ''
dimensions = []
for word_token in self.command_token:
if word_token.pos_ == 'NUM':
defining_number = True
current_number += word_token.text + ' '
else:
if defining_number: dimensions.append(w2n.word_to_num(current_number.strip()))
defining_number = False
current_number = ''
if defining_number:
dimensions.append(w2n.word_to_num(current_number.strip()))
return dimensions
|
import netCDF4 as cdf
import numpy as np
import os
import re
"""
folder paths should end with a forward slash
src: root path of product
dst: destination of product
bounds: bounds of the subset given as [latmin, latmax, longmin, longmax]
"""
def subSetFolder(src, dst, bounds):
FILE_COORDS = "geo_coordinates.nc"
#map the bounds in lat/long onto indexes
geoFile = cdf.Dataset(src+FILE_COORDS)
lats = geoFile.variables['latitude'][:]
longs = geoFile.variables['longitude'][:]
mask = np.logical_and(np.logical_and(lats >= bounds[0], lats <= bounds[1]), np.logical_and(longs >= bounds[2], longs <= bounds[3]))
#subset all files in folder
for subdir, dirs, files in os.walk(src):
for f in files:
if re.match('[a-zA-Z0-9]{3}[0-9]_[a-z0-9]*\.nc', f) or re.match('geo_coordinates.nc', f):
print(f)
subSetCDF(src+f, dst+f, mask)
"""
creates a subset of the provided netCDF4 file,
that only contains the values with indexes inside the given bounds.
bounds: dictionary specifiying the bounds for each dimensions
key: dimension dimName
value: array [latmin, latmax, longmin, longmax] bound
"""
def subSetCDF(src, dst, mask):
orig = cdf.Dataset(src);
copy = cdf.Dataset(dst, "w");
for attr in orig.ncattrs():
copy.setncattr(attr, orig.getncattr(attr))
copyVars = {}
for var in orig.variables:
copyVars[var] = np.extract(mask[:], orig.variables[var][:])
for var in copyVars:
copy.createDimension('dim'+var, copyVars[var].size)
v = copy.createVariable(var, orig.variables[var].datatype, 'dim'+var)
for attr in orig.variables[var].ncattrs():
v.setncattr(attr, orig.variables[var].getncattr(attr))
v[:] = copyVars[var][:]
def testSubSetCDF():
latmin = 39.0
latmax = 43.0
longmin = -21.0
longmax = -19.0
src = "/home/wegner/Documents/testData/testIn/"
dst = "/home/wegner/Documents/testData/testOut/"
subSetFolder(src, dst, [latmin, latmax, longmin, longmax])
testSubSetCDF()
|
import wx
import re
import os
import collections
import prefs
class World(dict):
_defaults = {
'port' : '7777',
'auto_login' : False,
'login_script' : 'connect %u %p',
}
def __init__(self, data):
data = data or World._defaults
for f in data:
if data.get(f) is not None: self[f] = data.get(f)
def save(self):
global _config
worldname = re.sub(r'\W', '_', self.get('name'))
_config.DeleteGroup(worldname)
_config.SetPath(worldname)
for f in self:
if self.get(f): _config.Write(f, str(self.get(f)))
_config.SetPath('/')
_config.Flush()
mainwindow = wx.GetApp().GetTopWindow()
if mainwindow:
mainwindow.rebuildShortlist()
worlds = collections.OrderedDict({})
_defaults = {}
_config = None
worlds_file = os.path.join(prefs.prefs_dir(), 'worlds')
def Initialize():
global _config, worlds, _defaults
_config = wx.FileConfig(localFilename = worlds_file)
# loop worlds...
g_more, worldname, g_index = _config.GetFirstGroup()
if g_more: # do we have anything at all from the config file?
while g_more: # yes, loop and fill stuff out.
_config.SetPath(worldname)
worlddata = {}
# loop data lines inside each world....
e_more, dataname, e_index = _config.GetFirstEntry()
while e_more:
worlddata[dataname] = _config.Read(dataname)
# ew boolean handling. Maybe go thru prefs to do this in one place
if worlddata[dataname] == "True" : worlddata[dataname] = True
if worlddata[dataname] == "False": worlddata[dataname] = False
e_more, dataname, e_index = _config.GetNextEntry(e_index)
# build the World object
worlds[worlddata['name']] = World(worlddata)
# carry on, back to the top for the next world
_config.SetPath('/')
g_more, worldname, g_index = _config.GetNextGroup(g_index)
else: # nothing from config file, grab the initial_worlds data
import json
path = wx.GetApp().path
initial_worlds = json.load(open(os.path.join(path, 'initial_worlds.json'),'r'))
for world_data in initial_worlds:
world = World(world_data)
world.save()
worlds[world.get('name')] = world
|
#!/usr/local/bin/python
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from matplotlib import cm
import seaborn as sns
import numpy as np
import pandas as pd
sp = "100"
lp = "100"
fname = "soft_pf0.2_sp" + sp + "_lp" + lp + "_condensed.density"
df = pd.read_csv(fname, delim_whitespace=True, header=None)
fig, ax = plt.subplots(1, 2, figsize=(8, 3))
# sns.heatmap(df,cmap=cm.viridis,ax=ax[0])
data = df.replace(0, 1e-10)
data = data / data.sum().sum()
min_data = data.min().min()
if min_data == 0:
min_data = 1
max_data = data.max().max()
log_norm = LogNorm(vmin=min_data, vmax=max_data)
cbar_ticks = [
10 ** i
for i in range(
int(np.floor(np.log10(min_data))), 1 + int(np.ceil(np.log10(max_data)))
)
]
sns.heatmap(
data, norm=log_norm, cmap=cm.viridis, ax=ax[0], cbar_kws={"ticks": cbar_ticks}
)
fft_data = np.fft.fftshift(np.fft.fft2(df))
data = np.abs(fft_data)
# data=data/data.sum().sum()
min_data = data.min().min()
if min_data == 0:
min_data = 1
max_data = data.max().max()
log_norm = LogNorm(vmin=min_data, vmax=max_data)
cbar_ticks = [
10 ** i
for i in range(
int(np.floor(np.log10(min_data))), 1 + int(np.ceil(np.log10(max_data)))
)
]
sns.heatmap(
data, norm=log_norm, cmap=cm.viridis, ax=ax[1], cbar_kws={"ticks": cbar_ticks}
)
savename = "sp" + sp + "_lp" + lp
fig.savefig(savename + ".png", dpi=300)
f = open(savename + "_fft_max.txt", "w")
f.write(str(np.max(data[data.shape[0] // 2])))
f.close()
|
from datetime import datetime as dt
import re
import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
# Class needed for the pipeline to work
class ColumnSelector(BaseEstimator, TransformerMixin):
def __init__(self, columns):
self.columns = columns
def fit(self, X, y=None):
return self
def transform(self, X):
assert isinstance(X, pd.DataFrame)
try:
return X[self.columns]
except KeyError:
cols_error = list(set(self.columns) - set(X.columns))
raise KeyError("The DataFrame does not include the columns: %s" % cols_error)
# Some utility functions
def preprocess(df):
# Fill N/A values
df.season_holidayed_code.fillna(-1, inplace = True)
df.state_code_residence.fillna(-1, inplace = True)
# Parse dates
for col in ['checkout_date', 'checkin_date', 'booking_date']:
df[col] = df[col].apply(lambda x: pd.datetime.strptime(x, '%d/%m/%y'))
# Add datetime features
df['booking_in_advance'] = (df['checkin_date'] - df['booking_date']).dt.days
df['days_stayed'] = (df['checkout_date'] - df['checkin_date']).dt.days
# Apply add_datepart function to datetime features
for col in ['checkout_date', 'checkin_date', 'booking_date']:
# add date features
add_datepart(df, col, drop=True)
# Add other features
df['n_people'] = df['numberofadults'] + df['numberofchildren']
return df
def add_datepart(df, fldname, drop=True, time=False, errors="raise"):
"""add_datepart converts a column of df from a datetime64 to many columns containing
the information from the date. This applies changes inplace.
Parameters:
-----------
df: A pandas data frame. df gain several new columns.
fldname: A string that is the name of the date column you wish to expand.
If it is not a datetime64 series, it will be converted to one with pd.to_datetime.
drop: If true then the original date column will be removed.
time: If true time features: Hour, Minute, Second will be added.
Examples:
---------
>>> df = pd.DataFrame({ 'A' : pd.to_datetime(['3/11/2000', '3/12/2000', '3/13/2000'], infer_datetime_format=False) })
>>> df
A
0 2000-03-11
1 2000-03-12
2 2000-03-13
>>> add_datepart(df, 'A')
>>> df
AYear AMonth AWeek ADay ADayofweek ADayofyear AIs_month_end AIs_month_start AIs_quarter_end AIs_quarter_start AIs_year_end AIs_year_start AElapsed
0 2000 3 10 11 5 71 False False False False False False 952732800
1 2000 3 10 12 6 72 False False False False False False 952819200
2 2000 3 11 13 0 73 False False False False False False 952905600
"""
fld = df[fldname]
fld_dtype = fld.dtype
if isinstance(fld_dtype, pd.core.dtypes.dtypes.DatetimeTZDtype):
fld_dtype = np.datetime64
if not np.issubdtype(fld_dtype, np.datetime64):
df[fldname] = fld = pd.to_datetime(fld, infer_datetime_format=True, errors=errors)
targ_pre = re.sub('[Dd]ate$', '', fldname)
attr = ['Year', 'Month', 'Week', 'Day', 'Dayofweek', 'Dayofyear',
'Is_month_end', 'Is_month_start', 'Is_quarter_end', 'Is_quarter_start', 'Is_year_end', 'Is_year_start']
if time: attr = attr + ['Hour', 'Minute', 'Second']
for n in attr: df[targ_pre + n] = getattr(fld.dt, n.lower())
df[targ_pre + 'Elapsed'] = fld.astype(np.int64) // 10 ** 9
if drop: df.drop(fldname, axis=1, inplace=True)
|
from functions import box_volume, ball_volume, pipe_volume
try:
running = True
while running:
shape = int(input("Annan kappaleen muoto:\n"
"(1 = laatikko, 2 = pallo, 3 = putki."
" 0 = lopetetaan ohjelman käyttäminen)\n"))
if shape == 0:
print("Kiitos ohjelman käytöstä!")
running = False
elif shape == 1:
width = float(input("Anna laatikon leveys (m):\n"))
height = float(input("Anna laatikon korkeus (m):\n"))
depth = float(input("Anna laatikon syvyys (m):\n"))
volume = box_volume(width, height, depth)
print(f"Laatikon tilavuus: {volume} m3.\n")
elif shape == 2:
radius = float(input("Anna pallon säde (m):\n"))
volume = ball_volume(radius)
print(f"Pallon tilavuus: {volume} m3.\n")
elif shape == 3:
radius = float(input("Anna lieriön pohjan säde (m):\n"))
lenght = float(input("Anna lieriön pituus (m): \n"))
volume = pipe_volume(radius, lenght)
print(f"Lieriön tilavuus: {volume} m3.\n")
else:
print("Väärä muoto!")
except ValueError:
print("Väärä muoto!")
|
# coding=utf-8
# 批量修改图片尺寸
# imageResize(r"D:\tmp", r"D:\tmp\3", 0.7)
from PIL import Image
import os
def imageResize(input_path, output_path, scale):
# 获取输入文件夹中的所有文件/夹,并改变工作空间
files = os.listdir(input_path)
os.chdir(input_path) #更改为当前路径Input_path
# 判断输出文件夹是否存在,不存在则创建
if (not os.path.exists(output_path)):
os.makedirs(output_path)
for file in files:
# 判断是否为文件,文件夹不操作
if (os.path.isfile(file)):
img = Image.open(file)
# width = int(img.size[0] * scale)
# height = int(img.size[1] * scale)
width = 256
height = 256
img = img.resize((width, height), Image.ANTIALIAS)
img.save(os.path.join(output_path, file))
return 0
imageResize('/Users/muscle/Desktop/SIP/test_images','/Users/muscle/Desktop/SIP/test_images',0)
|
#方法一 直接调用
import time
import random
from multiprocessing import Process
def run(name):
print('%s runing' %name)
time.sleep(random.randrange(1,50))
print('%s running end' %name)
p1=Process(target=run,args=('anne',)) #必须加,号
p2=Process(target=run,args=('alice',))
p3=Process(target=run,args=('biantai',))
p4=Process(target=run,args=('haha',))
p1.start()
p2.start()
p3.start()
p4.start()
print('主线程')
|
class Solution:
def moveZeroes(self, nums: List[int]) -> None:
nums[:] = [n for n in nums if n != 0] + [0] * nums.count(0)
|
def encode(plain_text):
encoded = ''
chuncksize = 5
for char in plain_text.lower():
encoded += flip(char)
chuncked = ''
counter = chuncksize
for char in encoded:
chuncked += char
counter -= 1
if counter == 0:
chuncked += ' '
counter = chuncksize
if chuncked[-1] == ' ':
chuncked = chuncked[:-1]
return chuncked
def decode(ciphered_text):
temp = ''
for char in ciphered_text.lower():
temp += flip(char)
return temp
def flip(char):
lower_base = ord('a')
alphabet_size = 25
if char.islower():
return chr(alphabet_size -(ord(char) - lower_base)
+ lower_base)
elif char in ',._ ':
return ''
else:
return char
|
import pygame
import math
from pygame.sprite import Sprite
from bullet_ship import Bullet
# There's a bug in invincibilty frame...
class Ship(Sprite):
def __init__(self, main_game_class):
"""Initialize the ship and its starting positions"""
super().__init__()
# Load parent's class necessary attributes. I think that is a correct term for it...
self.main_game = main_game_class
self.screen = self.main_game.screen
self.screen_rect = self.screen.get_rect()
self.settings = self.main_game.settings
self.image = pygame.image.load("images/ship.bmp")
self.core = pygame.image.load("images/core.bmp")
self.ship_rect = self.image.get_rect()
self.rect = self.core.get_rect()
# self.bombs = self.settings.bomb_limit
# Start the ship at the middle bottom of the screen. Place the core in the middle of the ship
self.ship_rect.midbottom = self.screen_rect.midbottom
self.rect.centerx = self.ship_rect.centerx
self.rect.top = self.ship_rect.top + 5
self.hit_box_surface = pygame.Surface(size=(4, 4))
self.hit_box = self.hit_box_surface.get_rect()
# Mask for the true tiny hitbox of 4px
self.mask = pygame.mask.from_surface(self.core)
# Store decimal values for the ship positions
self.x = float(self.ship_rect.x)
self.y = float(self.ship_rect.y)
# Flags:
self.god_mode = False
self.start_respawn_time = pygame.time.get_ticks()
# Invisibility settings
self.alpha = 255 # 255: opaque, 0: transparent
self.god_time = self.settings.god_time
self.frame_timer = 0 # For decreasing and increasing alpha
self.alpha_modifier = 255 // (self.settings.FPS // 4)
self.up = False # Boolean to detect whether to increase opacity or not
# Shoot bullet attributes
self.bullet_cooldown = self.settings.ship_bullet_cooldown
self.shoot_disabled = False
self.last_bullet_fired = pygame.time.get_ticks()
def update(self):
"""Full movement in 2D space whoop. Will add Shift for slower movement later"""
# Up - Left, Down-Right not firing bullets for some reason...
# The above problem is due to ghosting issues on the keyboard...
keys = pygame.key.get_pressed()
self._movement_update(keys)
self._shoot_update(keys)
self.ship_rect.x = self.x
self.ship_rect.y = self.y
self.rect.centerx = self.ship_rect.centerx
self.rect.top = self.ship_rect.top + 5
self.hit_box.center = self.rect.center
def _movement_update(self, keys):
if keys[pygame.K_d] and self.rect.right <= self.screen_rect.right:
self._update_vertical(keys)
self.x += 1 * self.settings.ship_speed
elif keys[pygame.K_a] and self.rect.left >= self.screen_rect.left:
self._update_vertical(keys)
self.x -= 1 * self.settings.ship_speed
elif keys[pygame.K_w] and self.rect.top >= self.screen_rect.top:
self.y -= 1 * self.settings.ship_speed
elif keys[pygame.K_s] and self.rect.bottom <= self.screen_rect.bottom:
self.y += 1 * self.settings.ship_speed
def _update_vertical(self, keys):
if keys[pygame.K_w] and self.ship_rect.top >= self.screen_rect.top:
self.y -= 1 * self.settings.ship_speed
elif keys[pygame.K_s] and self.ship_rect.bottom <= self.screen_rect.bottom:
self.y += 1 * self.settings.ship_speed
def draw_ship(self):
self._check_invisibility_time() # Blinking is a part of ship drawing, so I put it here.
if self.god_mode:
self._blink_ship()
self.screen.blit(self.image, self.ship_rect)
self.screen.blit(self.hit_box_surface, self.hit_box)
self.screen.blit(self.core, self.rect)
def _blink_ship(self):
"""To denote that the ship is invincible after respawning"""
# Decrease then increase transparency
if self.frame_timer == self.settings.FPS // 4:
self.frame_timer = 0
self.up = not self.up # Reverse boolean. Ship will go transparent first anyway
if self.up:
self.alpha += self.alpha_modifier
else:
self.alpha -= self.alpha_modifier
self.image.set_alpha(self.alpha)
self.frame_timer += 1
def _check_invisibility_time(self):
now = pygame.time.get_ticks()
# I think the god_mode bug is in here... I think I didn't put the start_respawn_timer here
if now - self.start_respawn_time >= self.god_time:
self.god_mode = False
self.frame_timer = 0
self.up = False
self.alpha = 255
self.image.set_alpha(255) # Put the ship into opaque mode, now.
def respawn_ship(self):
"""When ship is hit, respawn it at the midbottom of the screen.
In the future, will give invisbility_time. Maybe do some blinking as well."""
self.ship_rect.midbottom = self.screen_rect.midbottom
self.rect.centerx = self.ship_rect.centerx
self.rect.y = self.ship_rect.y + 5
self.x = float(self.ship_rect.x)
self.y = float(self.ship_rect.y)
self.hit_box.center = self.rect.center
def _fire_bullet(self):
if len(self.main_game.bullets) < math.floor(self.settings.boolet_limit):
bullet = Bullet(self.main_game)
self.main_game.bullets.add(bullet)
def _shoot_update(self, keys):
self._check_bullet_cooldown()
if keys[pygame.K_RETURN] and not self.shoot_disabled:
self._fire_bullet()
self.shoot_disabled = True
def _check_bullet_cooldown(self):
"""Yeah, I don't want it to turn into a lazer beam of ultimate lethality"""
time_now = pygame.time.get_ticks()
if time_now - self.last_bullet_fired >= self.bullet_cooldown:
self.shoot_disabled = False
self.last_bullet_fired = pygame.time.get_ticks()
|
from flask_wtf import FlaskForm as Form
from wtforms import PasswordField, StringField, SubmitField, BooleanField, SelectField, IntegerField, FileField, DateTimeField, HiddenField, FloatField
from wtforms.validators import DataRequired, Length, EqualTo, ValidationError, NumberRange, InputRequired
from flask_wtf.file import FileAllowed, FileRequired
from app.db_classes import Therapists, Patients
from datetime import datetime
class LoginForm(Form):
username = StringField('Username', validators=[DataRequired(), Length(min=3, max=19)])
password = PasswordField('Password', validators=[DataRequired(), Length(max=19)])
remember_me = BooleanField("Remember me")
submit = SubmitField('Log In')
class SignUpForm(Form):
title = SelectField(u'Select your title at the hospital', choices=[('nrs', 'nurse'), ('dr', 'Dr.')])
username = StringField('Username', validators=[DataRequired(), Length(min=6, max=20)])
password = PasswordField('Password', validators=[DataRequired(), Length(max=20)])
password = PasswordField('Password', validators=[DataRequired(), Length(max=20)])
password2 = PasswordField('Repeat the password', validators=[DataRequired(), Length(max=19), EqualTo('password')])
fname = StringField("First Name", validators=[DataRequired(), Length(max=20)])
lname = StringField("Last Name", validators=[DataRequired(), Length(max=20)])
submit = SubmitField('Sign Up')
def validate_username(self, username):
user = Therapists.query.filter_by(username=username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
class AddPatientForm(Form):
fname = StringField("First Name", validators=[DataRequired(), Length(max=20)])
lname = StringField("Last Name", validators=[DataRequired(), Length(max=20)])
ssn = StringField("SSN", validators=[Length(max=9)])
gender = SelectField("Patient's gender", choices=[("f", "female"), ("m", "male")])
age = IntegerField("Patient's age")
#therapist_id = SelectField(u'Select therapist', coerce=int, validators=[InputRequired()])
submit = SubmitField('Submit')
# When you add any methods that match the pattern validate_<field_name>,
# WTForms takes those as custom validators
# and invokes them in addition to the stock validators.
def validate_ssn(self, ssn):
user = Patients.query.filter_by(ssn=ssn.data).first()
if user is not None:
raise ValidationError('Error adding new patient. Check that patient with current ssn already doesn\'t exists')
class ImageForm(Form):
photo = FileField(label="Select image", validators=[FileRequired(), FileAllowed(['jpg', 'png', 'jpeg'], 'Images only!')])
patient_id = SelectField(u'Select patient', coerce=int, validators=[InputRequired()])
datetime = DateTimeField("Select datetime of uplodaded image. Default is now()", default=datetime.today)
im_type = SelectField(u'Select image type', coerce=int, validators=[InputRequired()])
analyze = BooleanField("Send to analysis module")
submit = SubmitField('Upload')
class PatientsForm(Form):
# add validaion fields to be careful
# quering data and puting it to url
search_field = StringField('Search for Patients', render_kw={"placeholder": "'Name Surname' OR SSN ('123456789')"})
sort_by_ssn = BooleanField("using ssn", default=False)
submit = SubmitField('Search')
class EditImgAnalysisForm(Form):
img_id = HiddenField("Image id")
tumor = StringField("Tumor analysis:")
diagnosis = SelectField(u'Diagnosis', coerce=int, validators=[InputRequired()])
recommendations = StringField("Recommendations")
confidence = FloatField("Confidence")
verified = BooleanField("Verified", default=True)
submit = SubmitField('Save changes')
|
from redis import StrictRedis
import json
from src import env
CACHED_CARDS = 'cards'
class Redis:
def __init__(self):
self.redis = StrictRedis(
host=env.get_redis_host(),
port=env.get_redis_port(),
password=env.get_redis_password()
)
def get_cached_data(self, cache_key):
return json.loads(self.redis.get(cache_key))
def cache_data(self, cache_key, data):
self.redis.set(cache_key, data if type(data) == str else json.dumps(data))
|
import module
from pydub.playback import play
from pydub import AudioSegment
import threading
import time
class Pad:
"""
Pad class for playing and modifying sounds
"""
def __init__(self):
"""
Currently empty, can be useful in the future
"""
pass
def play_sound(self, btn):
"""
Inputs:
btn (int): Pressed button
Plays sounds according to pressed button in a separate thread
"""
threading.Thread(target=play, args=(module.sounds[btn],)).start()
time.sleep(0.1)
def load_sounds(self):
"""
Loads sounds from either USB or from .sounds folder
ToDo maybe add it to __init__? What if USB or .sounds doesnt exist?
"""
for i in range(16):
try:
sound = AudioSegment.from_wav(f"/media/usb/{str(i+1)}.wav")
module.sound_path = '/media/usb'
except FileNotFoundError:
sound = AudioSegment.from_wav(f"./sounds/{str(i+1)}.wav")
module.sound_path = './sounds'
module.sounds.append(sound)
module.usb_sounds = module.sounds
print(f'{module.current_time()} Sounds path: {module.sound_path}')
def toggle_pitch_mode(self):
"""
Creates different pitches for selected sound object
ToDo currently pitches only get higher, but not lower.
"""
if not module.selected: # Sound needs to be selected
print(f'{module.current_time()} Please select a sound first')
time.sleep(0.3)
return
if module.pitch_mode: # Turns off pitch mode if it's already active
module.sounds = module.usb_sounds
module.pitch_mode = False
time.sleep(0.3)
return
module.pitch_mode = True
out_sounds = []
octaves = 2
for i in range(16):
new_sample_rate = int(module.usb_sounds[module.selected-1].frame_rate * (2.0 ** octaves))
sound_resample = module.usb_sounds[module.selected-1]._spawn(module.usb_sounds[module.selected-1].raw_data,
overrides={'frame_rate': new_sample_rate})
sound = sound_resample.set_frame_rate(44100) # ToDo see if that's the problem why pitch only gets higher
out_sounds.append(sound)
octaves -= 0.1
module.sounds = out_sounds
time.sleep(0.3)
|
num1 =int(input("첫 번째 수 : "))
num2 =int(input("두 번째 수 : "))
print("%d ** %d = %d"%(num1,num2,num1**num2))
print("%d // %d = %d"%(num1,num2,num1//num2)) #몫
print("%d / %d = %.2f"%(num1,num2,num1/num2)) #나누기
print("%d %% %d = %d"%(num1,num2,num1%num2)) #나머지
#%를 출력할때 %%두번 적어줍니다.
#%가 포맷코드의 시작 글자 => %%적어줘야지 이거 포맷코드가 아니라 글자구나~ 인식
|
import argparse
import sys
def get_args_from_settings(ssettings):
sys.argv=[' '] + ssettings.split()
parser = argparse.ArgumentParser(description='pythia8 fastjet on the fly')
pyconf.add_standard_pythia_args(parser)
parser.add_argument('--output', default="test_ang_ue.root", type=str)
parser.add_argument('--user-seed', help='pythia seed', default=1111, type=int)
args = parser.parse_args()
return args
from heppy.pythiautils import configuration as pyconf
def pythia_init_from_string(ssettings, mycfg = []):
# example: ssettings = "--py-ecm 5000 --user-seed=100000 --nev 1000"
mycfg = []
args = get_args_from_settings(ssettings)
pythia8 = pyconf.create_and_init_pythia_from_args(args, mycfg)
return pythia8
|
import pandas as pd
import numpy as np
from pandas import DataFrame
from sklearn import linear_model
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_boston
boston= load_boston()
df_x = pd.DataFrame(boston.data, columns=boston.feature_names)
df_y=pd.DataFrame(boston.target)
reg=linear_model.LinearRegression()
x_train,x_test,y_train,y_test=train_test_split(df_x,df_y,test_size=0.25,random_state=4)
reg.fit(x_train,y_train)
print(reg.score(x_test,y_test))
pca=PCA(n_components=10, whiten='True')
x = pca.fit(df_x).transform(df_x)
x
|
import configparser
from datetime import datetime
import os
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
from pyspark.sql.types import TimestampType
config = configparser.ConfigParser()
config.read('dl.cfg')
os.environ['AWS_ACCESS_KEY_ID']=config['AWS']['AWS_ACCESS_KEY_ID']
os.environ['AWS_SECRET_ACCESS_KEY']=config['AWS']['AWS_SECRET_ACCESS_KEY']
os.environ['REGION']=config['AWS']['REGION']
def create_session():
spark = SparkSession \
.builder \
.config("spark.jars.packages", "org.apache.hadoop:hadoop-aws:2.7.0") \
.getOrCreate()
print("********* SPARK****************")
print(spark)
return spark
def process_song_data(spark,input_data,output_data):
print("*******Inicio********")
song_data = input_data + "song_data/A/B/C/*.json"
df_song = spark.read.json(song_data)
#SONG TABLE
songs_table = df_song.select("song_id", "title", "artist_id", "year", "duration").distinct()
#SONG TABLE
artist_table = df_song.selectExpr("artist_id", "artist_name name", "artist_location location","artist_latitude latitude", "artist_longitude longitude")
#GUARDAR
path_song = output_data + "song_table.parquet"
path_artist = output_data + "artist_table.parquet"
songs_table.write.parquet(path=path_song,partitionBy = ("year", "artist_id"),mode = "overwrite")
artist_table.write.parquet(path=path_artist,mode = "overwrite")
print("******Se guardo correctamente*******")
return df_song
def process_long_data(spark,input_data,output_data):
log_data = input_data + "log_data/2018/11/*.json"
df_long = spark.read.json(log_data)
song_data = input_data + "song_data/A/B/C/*.json"
df_song = spark.read.json(song_data)
#Long data
df_long = df_long.filter(df_long.page == "NextSong")
#Convertir ts a timeStamp
get_datetime = udf(lambda x: datetime.fromtimestamp(x / 1000), TimestampType())
df_long = df_long.withColumn("start_time", get_datetime(df_long.ts))
#USER TABLE
user_table = df_long.selectExpr("userId user_id", "firstName first_name", "lastName last_name", "gender", "level").distinct()
#TIME TABLE
table_time = df_long.select("start_time").distinct()
get_hour = udf(lambda x: x.hour)
table_time = table_time.withColumn("hour", get_hour(table_time.start_time))
get_day = udf(lambda x: x.day)
table_time = table_time.withColumn("day", get_day(table_time.start_time))
get_week = udf(lambda x: x.isocalendar()[1])
table_time = table_time.withColumn("week", get_week(table_time.start_time))
get_month = udf(lambda x: x.month)
table_time = table_time.withColumn("month", get_month(table_time.start_time))
get_year = udf(lambda x: x.year)
table_time = table_time.withColumn("year", get_year(table_time.start_time))
get_weekday = udf(lambda x: x.weekday())
table_time = table_time.withColumn("weekday", get_weekday(table_time.start_time))
#SONG_PLAY TABLE
cond = [df_long.artist == df_song.artist_name,df_long.song == df_song.title]
df_join = df_long.join(df_song, cond, "inner")
#Se agrega Identificador a la tabla SONG_PLAY
df_join = df_join.withColumn("songplay_id", monotonically_increasing_id())
#Tabla SONG_TABLE
song_play_table = df_join.selectExpr("songplay_id","start_time","userId user_id","level","song_id","artist_id","sessionId session_id","location","userAgent user_agent")
#GUARDAR
path_user = output_data + "user_table.parquet"
path_time = output_data + "time_table.parquet"
path_songPlay = output_data + "songPlay_table.parquet"
user_table.write.parquet(path=path_user, mode = "overwrite" )
table_time.write.parquet(path=path_time,mode = "overwrite")
song_play_table.write.parquet(path=path_songPlay,mode = "overwrite")
print("*******Se guardo correctamente********")
def main():
spark = create_session()
input_data = "s3a://udacity-dend/"
output_data = "s3a://helaine-data-lake/parquet_files/"
#df_song = process_song_data(spark, input_data, output_data)
process_long_data(spark, input_data, output_data)
spark.stop()
if __name__ == "__main__":
main()
|
"""Core authentication views."""
import logging
import oath
from django.conf import settings
from django.http import (
HttpResponse, HttpResponseRedirect, Http404, JsonResponse)
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import translation
from django.utils.encoding import force_bytes
from django.utils.html import escape
from django.utils.http import urlsafe_base64_encode
from django.utils.translation import gettext as _
from django.views import generic
from django.views.decorators.cache import never_cache
from django.contrib.auth import (
authenticate, login, logout, views as auth_views
)
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.auth.tokens import default_token_generator
import django_otp
from modoboa.core import forms
from modoboa.core.password_hashers import get_password_hasher
from modoboa.lib import cryptutils
from modoboa.lib.views import UserFormKwargsMixin
from modoboa.parameters import tools as param_tools
from .. import models
from .. import sms_backends
from .. import signals
from .base import find_nextlocation
logger = logging.getLogger("modoboa.auth")
def dologin(request):
"""Try to authenticate."""
error = None
if request.method == "POST":
form = forms.LoginForm(request.POST)
if form.is_valid():
logger = logging.getLogger("modoboa.auth")
user = authenticate(username=form.cleaned_data["username"],
password=form.cleaned_data["password"])
if user and user.is_active:
condition = (
user.is_local and
param_tools.get_global_parameter(
"update_scheme", raise_exception=False)
)
if condition:
# check if password scheme is correct
scheme = param_tools.get_global_parameter(
"password_scheme", raise_exception=False)
# use SHA512CRYPT as default fallback
if scheme is None:
pwhash = get_password_hasher('sha512crypt')()
else:
pwhash = get_password_hasher(scheme)()
if not user.password.startswith(pwhash.scheme):
logging.info(
_("Password scheme mismatch. Updating %s password"),
user.username
)
user.set_password(form.cleaned_data["password"])
user.save()
if pwhash.needs_rehash(user.password):
logging.info(
_("Password hash parameter missmatch. "
"Updating %s password"),
user.username
)
user.set_password(form.cleaned_data["password"])
user.save()
login(request, user)
if not form.cleaned_data["rememberme"]:
request.session.set_expiry(0)
translation.activate(request.user.language)
logger.info(
_("User '%s' successfully logged in") % user.username
)
signals.user_login.send(
sender="dologin",
username=form.cleaned_data["username"],
password=form.cleaned_data["password"])
response = HttpResponseRedirect(find_nextlocation(request, user))
response.set_cookie(settings.LANGUAGE_COOKIE_NAME, request.user.language)
return response
error = _(
"Your username and password didn't match. Please try again.")
logger.warning(
"Failed connection attempt from '%(addr)s' as user '%(user)s'"
% {"addr": request.META["REMOTE_ADDR"],
"user": escape(form.cleaned_data["username"])}
)
nextlocation = request.POST.get("next", "")
httpcode = 401
else:
form = forms.LoginForm()
nextlocation = request.GET.get("next", "")
httpcode = 200
announcements = signals.get_announcements.send(
sender="login", location="loginpage")
announcements = [announcement[1] for announcement in announcements]
return HttpResponse(
render_to_string(
"registration/login.html", {
"form": form, "error": error, "next": nextlocation,
"annoucements": announcements},
request),
status=httpcode)
dologin = never_cache(dologin)
def dologout(request):
"""Logout current user."""
if not request.user.is_anonymous:
signals.user_logout.send(sender="dologout", request=request)
logger = logging.getLogger("modoboa.auth")
logger.info(
_("User '{}' successfully logged out").format(
request.user.username))
logout(request)
return HttpResponseRedirect(reverse("core:login"))
class PasswordResetView(auth_views.PasswordResetView):
"""Custom view to override form."""
form_class = forms.PasswordResetForm
def setup(self, request, *args, **kwargs):
super().setup(request, *args, **kwargs)
self.from_email = request.localconfig.parameters.get_value(
"sender_address"
)
def get_context_data(self, **kwargs):
"""Include help text."""
context = super().get_context_data(**kwargs)
context["announcement"] = (
self.request.localconfig.parameters
.get_value("password_recovery_msg")
)
return context
def form_valid(self, form):
"""Redirect to code verification page if needed."""
sms_password_recovery = (
self.request.localconfig.parameters
.get_value("sms_password_recovery")
)
if not sms_password_recovery:
return super().form_valid(form)
user = models.User._default_manager.filter(
email=form.cleaned_data["email"], phone_number__isnull=False
).first()
if not user:
# Fallback to email
return super().form_valid(form)
backend = sms_backends.get_active_backend(
self.request.localconfig.parameters)
secret = cryptutils.random_hex_key(20)
code = oath.totp(secret)
text = _(
"Please use the following code to recover your Modoboa password: {}"
.format(code)
)
if not backend.send(text, [str(user.phone_number)]):
return super().form_valid(form)
self.request.session["user_pk"] = user.pk
self.request.session["totp_secret"] = secret
return HttpResponseRedirect(reverse("password_reset_confirm_code"))
class VerifySMSCodeView(generic.FormView):
"""View to verify a code received by SMS."""
form_class = forms.VerifySMSCodeForm
template_name = "registration/password_reset_confirm_code.html"
def get_form_kwargs(self):
"""Include totp secret in kwargs."""
kwargs = super().get_form_kwargs()
try:
kwargs.update({"totp_secret": self.request.session["totp_secret"]})
except KeyError:
raise Http404
return kwargs
def form_valid(self, form):
"""Redirect to reset password form."""
user = models.User.objects.get(pk=self.request.session.pop("user_pk"))
self.request.session.pop("totp_secret")
token = default_token_generator.make_token(user)
uid = urlsafe_base64_encode(force_bytes(user.pk))
url = reverse("password_reset_confirm", args=[uid, token])
return HttpResponseRedirect(url)
class ResendSMSCodeView(generic.View):
"""A view to resend validation code."""
def get(self, request, *args, **kwargs):
sms_password_recovery = (
self.request.localconfig.parameters
.get_value("sms_password_recovery")
)
if not sms_password_recovery:
raise Http404
try:
user = models.User._default_manager.get(
pk=self.request.session["user_pk"])
except KeyError:
raise Http404
backend = sms_backends.get_active_backend(
self.request.localconfig.parameters)
secret = cryptutils.random_hex_key(20)
code = oath.totp(secret)
text = _(
"Please use the following code to recover your Modoboa password: {}"
.format(code)
)
if not backend.send(text, [user.phone_number]):
raise Http404
self.request.session["totp_secret"] = secret
return JsonResponse({"status": "ok"})
class TwoFactorCodeVerifyView(LoginRequiredMixin,
UserFormKwargsMixin,
generic.FormView):
"""View to verify a 2FA code after login."""
form_class = forms.Verify2FACodeForm
template_name = "registration/twofactor_code_verify.html"
def form_valid(self, form):
"""Login user."""
django_otp.login(self.request, form.cleaned_data["tfa_code"])
return HttpResponseRedirect(
find_nextlocation(self.request, self.request.user)
)
|
# Facebook Msg
verify_token = 'YOUR_VERIFY_TOKEN'
token = "YOUR_TOKEN"
# Database
DATABASE_NAME = 'yumngein'
DATABASE_USER = 'yumngeinadmin'
DATABASE_HOST = 'localhost'
DATABASE_PASSWORD = 'myBestPassword'
DATABASE_STRING_FORM = "postgresql://{}:{}@{}:5432/{}"
DATABASE_STRING = DATABASE_STRING_FORM.format(DATABASE_USER, DATABASE_PASSWORD,
DATABASE_HOST, DATABASE_NAME)
def getDatabaseString():
return DATABASE_STRING
|
def hor_mirror(s):
return '\n'.join(reversed(s.split('\n')))
def vert_mirror(s):
return '\n'.join(a[::-1] for a in s.split('\n'))
def oper(fct, s):
return fct(s)
|
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
class Neuron(object):
def __init__(self, w_num):
self.input = []
self.output = 0
self.weights = []
self.bias = 0
self.delta = 0
self.bias = np.random.randn()
for i in range(w_num):
self.weights.append(np.random.randn())
def forward(self, inp):
if len(inp) != len(self.weights):
raise Exception("input and weight's length mismatch!")
self.input = inp
self.output = 0
for i in range(len(self.input)):
self.output += self.weights[i] * self.input[i]
self.output = self.sigmoid(self.output + self.bias)
return self.output
def sigmoid(self, x):
return 1/(1+np.exp(-x))
def derivative_sigmoid(self, x):
return np.multiply(x, 1.0 - x)
def cal_delta(self, error):
self.delta = error * self.derivative_sigmoid(self.output)
def update(self, learnRate):
for i in range(len(self.weights)):
self.weights[i] -= learnRate * self.delta * self.input[i]
self.bias -= learnRate * self.delta
# In[3]:
class NeuralLayer(object):
def __init__(self, input_ch, neuron_num):
self.neurons = []
for i in range(neuron_num):
neuron = Neuron(input_ch)
self.neurons.append(neuron)
def forward(self, inp):
output = []
for i in range(len(self.neurons)):
output.append(self.neurons[i].forward(inp))
return output
def get_deltas(self, pre_layer):
deltas = []
pre_l_neurons = pre_layer.neurons
for i in range(len(self.neurons)):
error = 0
for pre_l_neuron in pre_l_neurons:
error += pre_l_neuron.delta * pre_l_neuron.weights[i]
self.neurons[i].cal_delta(error)
deltas.append(self.neurons[i].delta)
return deltas
def update(self, learnRate):
for neuron in self.neurons:
neuron.update(learnRate)
# In[4]:
class NeuralNetwork(object):
def __init__(self, learnRate, debug=False):
self.layers = []
self.learnRate = learnRate
self.debug = debug
def train(self, dataset):
inputs, labels = dataset
for i in range(len(inputs)):
self.forward(inputs[i])
self.backpropagate(labels[i])
self.update()
return labels[i] - self.layers[-1].neurons[0].output
def forward(self, inp):
x = inp
for i in range(len(self.layers)):
x = self.layers[i].forward(x)
if self.debug:
print("Layer {0} output {1}".format(i, x))
return x
def backpropagate(self, label):
last_layer = None
deltas = []
for i in range(len(self.layers), 0, -1):
current_layer = self.layers[i-1]
if last_layer is None:
for i in range(len(current_layer.neurons)):
error = -(label - current_layer.neurons[i].output)
current_layer.neurons[i].cal_delta(error)
else:
deltas = current_layer.get_deltas(last_layer)
last_layer = current_layer
if self.debug:
print("Layer {0} deltas {1}".format(i, deltas))
def update(self):
for layer in self.layers:
layer.update(self.learnRate)
def predict(self, inp):
return self.forward(inp)
def add_layer(self, layer):
self.layers.append(layer)
def set_lr(self, lr):
self.learnRate = lr
# In[5]:
def generate_linear(n=100):
pts = np.random.uniform(0, 1, (n, 2))
inputs = []
labels = []
for pt in pts:
inputs.append([pt[0], pt[1]])
distance = (pt[0]-pt[1])/1.414
if pt[0] > pt[1]:
labels.append(0)
else:
labels.append(1)
return inputs, labels
def generate_XOR_easy():
inputs = []
labels = []
for i in range(11):
inputs.append([0.1*i, 0.1*i])
labels.append(0)
if 0.1*i == 0.5:
continue
inputs.append([0.1*i, 1-0.1*i])
labels.append(1)
return inputs, labels
# In[6]:
def show_result(inputs, labels, pred_y):
plt.subplot(1,2,1)
plt.title('Ground truth', fontsize=18)
for i in range(len(inputs)):
if labels[i] - 0 < 0.2:
plt.plot(inputs[i][0], inputs[i][1], 'ro')
else:
plt.plot(inputs[i][0], inputs[i][1], 'bo')
plt.subplot(1,2,2)
plt.title('Predict result', fontsize=18)
for i in range(len(inputs)):
if pred_y[i] - 0 < 0.2:
plt.plot(inputs[i][0], inputs[i][1], 'ro')
else:
plt.plot(inputs[i][0], inputs[i][1], 'bo')
plt.show()
# In[7]:
learnRate = 0.5
epoch = 1500
dataset = generate_linear(100)
input_channel = len(dataset[0][0])
layers = [input_channel,4,4,1]
neuron_layer = []
nn = NeuralNetwork(learnRate = learnRate, debug=False)
for i in range(len(layers)-1):
weight = []
bias = np.random.randn()
for j in range(layers[i]*layers[i+1]):
weight.append(np.random.randn())
layer = NeuralLayer(input_ch=layers[i], neuron_num=layers[i+1])
nn.add_layer(layer)
for i in range(epoch):
loss = nn.train(dataset)
loss_sum = 0
if i % 100 == 0:
loss_sum = np.mean(loss_sum + loss)
print("epoch {0} loss {1}".format(i, loss_sum))
else:
loss_sum += loss
# In[11]:
#test
inputs, labels = dataset
pred_y = []
error_count = 0
for i in range(len(inputs)):
prediction = nn.predict(inputs[i])[0]
#print("input {0} , label {1}, predict {2}".format(inputs[i], labels[i], prediction))
pred_y.append(prediction)
if prediction - labels[i] > 0.2:
error_count += 1
print("error/total: {0}/{1}".format(error_count, len(inputs)))
# In[12]:
for i in pred_y:
print(i)
# In[13]:
show_result(inputs, labels, pred_y)
|
from django.apps import AppConfig
class TraintestConfig(AppConfig):
name = 'traintest'
|
'''
字符串的切片,str[::-1],实现字符串的反转。
其他的List 等也可以这样实现反转
注意 python3 中的数字已经没有的范围限制,但是要人为的处理 int的范围 -2^31 ~ 2^31 - 1
'''
class Solution:
def reverse(self, x: int) -> int:
if x == 0:
return 0
str_x = str(x)
result = ''
if str_x[0] == '-':
result = result + '-'
result = result + str_x[::-1].lstrip("0").rstrip("-") # 这里要去掉 x 左边的 - 和右边的 0
result = int(result)
if result < -2**31 or result > 2**31 - 1:
return 0
else:
return result
|
###MODULES###
import numpy as np
import pandas as pd
import os, sys
import time as t
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.ticker import MaxNLocator
import pathlib
from matplotlib.colors import Normalize
from scipy import interpolate
norm = Normalize()
from resource import getrusage, RUSAGE_SELF
import random
mpl.rcParams['axes.linewidth'] = 1.5 #set the value globally
#CONSTANTS
cwd_PYTHON = os.getcwd() + '/'
RHO = 1000.0
DX = 0.025/256.0
NX = 512
PERIOD = 0.1
RADIUSLARGE = 0.002
RADIUSSMALL = 0.5*RADIUSLARGE
maxR = 0.025/RADIUSLARGE
csfont = {'fontname':'Times New Roman'}
#System Arguments
Theta = sys.argv[1]
Hx = sys.argv[2]
Hy = sys.argv[3]
perNumber = int(sys.argv[4])#5
#PARTICLE STREAMLINE CONSTANTS
nFrames = 240
minVal, maxVal = -6.0,6.0
rows, cols = 100, 100
nPart, nTrail = rows*cols, 80
timestep = 0.2/60.0
dX = 2.0*maxR/(1.0*NX)
seed = random.seed(11235)
cwd_FIGS = cwd_PYTHON+'../Figures/ParticleStreamline/L/per{0}/'.format(perNumber)#cwd_PYTHON+"../../Figures/ParticleStreamline/TestField2/"
pathlib.Path(cwd_FIGS).mkdir(parents=True, exist_ok=True)
cwd_Re = cwd_PYTHON+'Theta{0}/Hx{1}/Hy{2}/VTK/AVG/'.format(Theta,Hx,Hy)#cwd_PYTHON+'../../FieldData/TestField/'
cwd_POS = cwd_PYTHON+'Theta{0}/Hx{1}/Hy{2}/'.format(Theta,Hx,Hy)#cwd_POS = cwd_PYTHON+'../../FieldData/TestField/'
# constructs a filepath for the pos data of Re = $Re
def pname(cwd):
#return cwd+"/pd.txt"
#cwd = cwd_PYTHON
return cwd+"pd.txt"
def GetPosData(cwd,time,parTheta,parHx,parHy):
global RADIUSLARGE
#Load position data
pdData = pd.read_csv(pname(cwd),delimiter=' ')
#Split up individual sphere data by given index
UAdata = pdData[pdData['idx'] == 6].copy()
LAdata = pdData[pdData['idx'] == 19].copy()
UBdata = pdData[pdData['idx'] == 32].copy()
LBdata = pdData[pdData['idx'] == 45].copy()
#Sort data by time and reset indices
UAdata = UAdata.sort_values(by=['time'])
LAdata = LAdata.sort_values(by=['time'])
UBdata = UBdata.sort_values(by=['time'])
LBdata = LBdata.sort_values(by=['time'])
UAdata = UAdata.reset_index(drop=True)
LAdata = LAdata.reset_index(drop=True)
UBdata = UBdata.reset_index(drop=True)
LBdata = LBdata.reset_index(drop=True)
#Rename columns to previous data frames
UAdata = UAdata.rename(columns={"x":"aXU", "y":"aYU"})
LAdata = LAdata.rename(columns={"x":"aXL", "y":"aYL"})
UBdata = UBdata.rename(columns={"x":"bXU", "y":"bYU"})
LBdata = LBdata.rename(columns={"x":"bXL", "y":"bYL"})
#Combine separate dataframes to create previous dataframe used
splitDict = {'aXU':UAdata['aXU'],'aYU':UAdata['aYU'],'aXL':LAdata['aXL'],'aYL':LAdata['aYL'],
'bXU':UBdata['bXU'],'bYU':UBdata['bYU'],'bXL':LBdata['bXL'],'bYL':LBdata['bYL'],'time':UAdata['time']}
posData = pd.DataFrame(data=splitDict)
pos = posData[posData['time'] == time]
pos = pos.reset_index(drop=True)
#Renormalize
pos['aXU'] /= RADIUSLARGE
pos['aXL'] /= RADIUSLARGE
pos['aYU'] /= RADIUSLARGE
pos['aYL'] /= RADIUSLARGE
pos['bXU'] /= RADIUSLARGE
pos['bXL'] /= RADIUSLARGE
pos['bYU'] /= RADIUSLARGE
pos['bYL'] /= RADIUSLARGE
return pos
def GetPosDataLength(cwd):
data = pd.read_csv(pname(cwd),delimiter=' ')
return len(data['time'])
def GetAvgFieldData(cwd,idx):
global RADIUSLARGE
#Load position data
#Columns
#mx.flat my.flat avgW.flat avgP.flat avgUx.flat avgUy.flat
fieldData = pd.read_csv(cwd+'AVG_%04d.csv'%idx,delimiter=' ')
print(fieldData.head())
#All field values to a list
mxList = fieldData['mx'].values.tolist()
myList = fieldData['my'].values.tolist()
WList = fieldData['avgW'].values.tolist()
PList = fieldData['avgP'].values.tolist()
UxList = fieldData['avgUx'].values.tolist()
UyList = fieldData['avgUy'].values.tolist()
#Convert lists to numpy arrays
#Reshape them to be Nx x Ny
Nx, Ny = 1024, 1024
mxArr = np.array(mxList).reshape((Nx,Ny))/RADIUSLARGE
myArr = np.array(myList).reshape((Nx,Ny))/RADIUSLARGE
WArr = np.array(WList).reshape((Nx,Ny))
PArr = np.array(PList).reshape((Nx,Ny))
UxArr = np.array(UxList).reshape((Nx,Ny))/RADIUSLARGE
UyArr = np.array(UyList).reshape((Nx,Ny))/RADIUSLARGE
return (mxArr, myArr, WArr, PArr, UxArr, UyArr)
def AddDiscsToPlot(ax,pos):
#Add Discs
circle1 = Circle((pos.loc[0,'aXU_rot'], pos.loc[0,'aYU_rot']), 1.0, facecolor=(0.0,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle1)
circle2 = Circle((pos.loc[0,'aXL_rot'], pos.loc[0,'aYL_rot']), 0.5, facecolor=(0.0,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle2)
circle3 = Circle((pos.loc[0,'bXU_rot'], pos.loc[0,'bYU_rot']), 1.0, facecolor=(0.5,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle3)
circle4 = Circle((pos.loc[0,'bXL_rot'], pos.loc[0,'bYL_rot']), 0.5, facecolor=(0.5,)*3,
linewidth=1,alpha=1.0,zorder=6)
ax.add_patch(circle4)
#Add Swimmer "springs"
ax.plot([pos.loc[0,'aXU_rot'],pos.loc[0,'aXL_rot']],
[pos.loc[0,'aYU_rot'],pos.loc[0,'aYL_rot']],
color='black',linewidth=3,zorder=6)
ax.plot([pos.loc[0,'bXU_rot'],pos.loc[0,'bXL_rot']],
[pos.loc[0,'bYU_rot'],pos.loc[0,'bYL_rot']],
color=(0.5,)*3,linewidth=3,zorder=6)
return
def set_size(w,h, ax=None):
""" w, h: width, height in inches """
if not ax: ax=plt.gca()
l = ax.figure.subplotpars.left
r = ax.figure.subplotpars.right
t = ax.figure.subplotpars.top
b = ax.figure.subplotpars.bottom
figw = float(w)/(r-l)
figh = float(h)/(t-b)
ax.figure.set_size_inches(figw, figh)
return ax
def Rotate(xy, theta):
# https://en.wikipedia.org/wiki/Rotation_matrix#In_two_dimensions
#First Rotate based on Theta
#Allocate Arrays
rotationMatrix = np.zeros((2,2))
#Calculate rotation matrix
rotationMatrix[0,0] = np.cos(theta)
rotationMatrix[0,1] = -1.0*np.sin(theta)
rotationMatrix[1,0] = np.sin(theta)
rotationMatrix[1,1] = np.cos(theta)
return rotationMatrix.dot(xy)
def CalcLabAngle(pos):
#Find swimming axis (normal y-axis)
xU, xL = pos.loc[0,'aXU'], pos.loc[0,'aXL']
yU, yL = pos.loc[0,'aYU'], pos.loc[0,'aYL']
labX = xU - xL
labY = yU - yL
length = np.hypot(labX,labY)
normX = labX/length
normY = labY/length
#2) Calculate Theta
if(normX <= 0.0):
theta = np.arccos(normY)
else:
theta = -1.0*np.arccos(normY)+2.0*np.pi
print('theta = ',theta*180.0/np.pi)
return 2.0*np.pi - theta
def InterpolateToNewCoordinateSystem(x,y,mx,my,arrayUx,arrayUy):
#Create a uniform mesh for the interpolated velocity vectors!
mx_new, my_new = np.meshgrid(x,y)
#Interpolate Ux and Uy from original cartesian coordainates to new ones
#Griddata
print('About to inteprolate field data')
print('peak memory = ',getrusage(RUSAGE_SELF).ru_maxrss)
sys.stdout.flush()
arrayUx_new=interpolate.griddata((mx.flatten(),my.flatten()),arrayUx.flatten() , (mx_new,my_new),method='linear')
print('X transformation complete')
print('peak memory = ',getrusage(RUSAGE_SELF).ru_maxrss)
sys.stdout.flush()
arrayUy_new=interpolate.griddata((mx.flatten(),my.flatten()),arrayUy.flatten() , (mx_new,my_new),method='linear')
print('Coordinate Transformation Complete!')
print('peak memory = ',getrusage(RUSAGE_SELF).ru_maxrss)
sys.stdout.flush()
return (arrayUx_new,arrayUy_new)
def RotateSimulation(cwd,time,mx,my,Ux,Uy,pos):
global RADIUSLARGE
#Shift x and y by the CM location
xCM = 0.25*(pos.loc[0,'aXU'] + pos.loc[0,'bXU'] + pos.loc[0,'aXL'] + pos.loc[0,'bXL'])
yCM = 0.25*(pos.loc[0,'aYU'] + pos.loc[0,'bYU'] + pos.loc[0,'aYL'] + pos.loc[0,'bYL'])
#Do the same for mx and my
mx -= xCM
my -= yCM
#Shift pos data by xCM and yCM
pos['aXU'] -= xCM
pos['aXL'] -= xCM
pos['bXU'] -= xCM
pos['bXL'] -= xCM
pos['aYU'] -= yCM
pos['aYL'] -= yCM
pos['bYU'] -= yCM
pos['bYL'] -= yCM
#Rotate Reference frame by swimmer 1's axis
#Calculate Theta (Rotate by -Theta)
theta_rotate = CalcLabAngle(pos)
print('theta_rotate = ',theta_rotate*180.0/np.pi)
mxy = np.array([mx.flatten(),my.flatten()])
mxy_rot = np.zeros((2,1024*1024))
#Do the same for the U field
Uxy = np.array([Ux.flatten(),Uy.flatten()])
Uxy_rot = np.zeros((2,1024*1024))
for jdx in range(1024*1024):
mxy_rot[:,jdx] = Rotate(mxy[:,jdx],theta_rotate)
Uxy_rot[:,jdx] = Rotate(Uxy[:,jdx],theta_rotate)
mx_rot = mxy_rot[0,:].reshape((1024,1024))
my_rot = mxy_rot[1,:].reshape((1024,1024))
Ux_rot = Uxy_rot[0,:].reshape((1024,1024))
Uy_rot = Uxy_rot[1,:].reshape((1024,1024))
aU_pos = np.array([pos.loc[0,'aXU'],pos.loc[0,'aYU']])
aL_pos = np.array([pos.loc[0,'aXL'],pos.loc[0,'aYL']])
bU_pos = np.array([pos.loc[0,'bXU'],pos.loc[0,'bYU']])
bL_pos = np.array([pos.loc[0,'bXL'],pos.loc[0,'bYL']])
aU_rot = Rotate(aU_pos,theta_rotate)
print('aU = ',aU_pos)
print('aU_rot = ',aU_rot)
aL_rot = Rotate(aL_pos,theta_rotate)
bU_rot = Rotate(bU_pos,theta_rotate)
bL_rot = Rotate(bL_pos,theta_rotate)
pos['aXU_rot'], pos['aYU_rot'] = aU_rot[0], aU_rot[1]
pos['aXL_rot'], pos['aYL_rot'] = aL_rot[0], aL_rot[1]
pos['bXU_rot'], pos['bYU_rot'] = bU_rot[0], bU_rot[1]
pos['bXL_rot'], pos['bYL_rot'] = bL_rot[0], bL_rot[1]
#Interpolate onto a new coordinate system
x = np.linspace(-0.025/RADIUSLARGE,0.025/RADIUSLARGE,512)
y = np.linspace(-0.025/RADIUSLARGE,0.025/RADIUSLARGE,512)
mx_stream, my_stream = np.meshgrid(x,y)
interpUx, interpUy = InterpolateToNewCoordinateSystem(x,y,mx_rot,my_rot,Ux_rot,Uy_rot)
return (mx_stream, my_stream, interpUx, interpUy, pos)
#Plot New mesh and interpolated velocity field Ux and Uy
def PlotAvgU(cwd,mx,my,Ux,Uy,pos,space):
global FIGNUM, PERIOD,minVal,maxVal,Theta,Hx,Hy
#Here, we will visualize the velocity field on the new coordinate system
nRows, nCols = 1, 1
fig, ax = plt.subplots(nrows=nRows, ncols=nCols, num=0,figsize=(6,6),dpi=200)
ax.set_title(r'Average Velocity Field',fontsize=12)
normUx,normUy = Ux/np.hypot(Ux,Uy),Uy/np.hypot(Ux,Uy)
magU = np.hypot(Ux,Uy)
#Plot Vector field with quiver
ax.quiver(mx[::space,::space],my[::space,::space],
Ux[::space,::space],Uy[::space,::space],
color='white',pivot='mid',angles='xy',scale_units='xy', scale=10,zorder=5)
#Plot magnitude with contourf
ax.contourf(mx,my,magU,cmap='viridis')
AddDiscsToPlot(ax,pos)
#print('RSMALL = ',RSMALL)
ax.axis([minVal,maxVal,minVal,maxVal])
fig.tight_layout()
#plt.show()
fig.savefig(cwd+'avgU_T{0}_Hx{1}_Hy{2}_.png'.format(Theta,np.round(float(Hx)/2.0,2),np.round(float(Hy)/2.0,1)))
fig.clf()
plt.close()
return
#Plot New mesh and interpolated velocity field Ux and Uy
def PlotParticles(mx,my,U,pos,particles,frame,cwd):
global FIGNUM, PERIOD,minVal,maxVal,Theta,Hx,Hy
#Here, we will visualize the velocity field on the new coordinate system
nRows, nCols = 1, 1
fig, ax = plt.subplots(nrows=nRows, ncols=nCols, num=1,figsize=(6,6),dpi=200)
alpha = np.linspace(0.2,1.0,particles.nTime)
for idTime in range(particles.nTime):
pointColor = (1.0-(1.0*idTime/(1.0*particles.nTime)),)*3
alphaValue = alpha[idTime]
markerSize = particles.size[idTime,0]
ax.plot(particles.x[idTime].flatten(),particles.y[idTime].flatten(),
marker='o',ms=markerSize,color=pointColor,zorder=5,alpha=1,linewidth=0)
#Plot Swimmer
AddDiscsToPlot(ax,pos)
#print('RSMALL = ',RSMALL)
ax.axis([minVal,maxVal,minVal,maxVal])
fig.tight_layout()
fig.savefig(cwd+'PartStream_T{0}_Hx{1}_Hy{2}_{3}_.png'.format(Theta,np.round(float(Hx)/2.0,2),np.round(float(Hy)/2.0,1),frame))
#plt.show()
fig.clf()
plt.close()
return
class Particles:
def __init__(self,rows,cols,minVal,maxVal,nPart,nTime):
self.nPart = nPart
print('nTime = ',nTime)
self.nTime = nTime
print('self.nTime = ',self.nTime)
xvals = np.linspace(minVal,maxVal,rows)
yvals = np.linspace(minVal,maxVal,cols)
mx, my = np.meshgrid(xvals,yvals)
self.x = np.array([mx.flatten()]*nTime).reshape((nTime,nPart))
self.y = np.array([my.flatten()]*nTime).reshape((nTime,nPart))
self.xinit = self.x.copy()
self.yinit = self.y.copy()
self.vx = np.zeros((nTime,nPart))
self.vy = np.zeros((nTime,nPart))
self.vxinit = self.vx.copy()
self.vyinit = self.vy.copy()
self.idx = np.array([[int((self.x[a,b] + maxR)/dX) for b in range(self.nPart)] for a in range(self.nTime)])
self.idy = np.array([[int((self.y[a,b] + maxR)/dX) for b in range(self.nPart)] for a in range(self.nTime)])
self.age = np.array([random.randrange(10,240) for b in range(nPart)]*nTime).reshape((nTime,nPart))
self.life = self.age.copy()
self.size = np.array([np.linspace(0.001,.1,self.nTime) for a in range(self.nPart)]).T.reshape((nTime,nPart))
self.curr_age = np.zeros((nTime,nPart))
def CalcMeshIndex(self,idTime):
self.idx[idTime] = [int((self.x[idTime,b] + maxR)/dX) for b in range(self.nPart)]
self.idy[idTime] = [int((self.y[idTime,b] + maxR)/dX) for b in range(self.nPart)]
def AssignVelocity(self,idTime,nX,avgUx,avgUy):
indices = nX*self.idy[idTime]+self.idx[idTime]
self.vx[idTime] = avgUx[indices]
self.vy[idTime] = avgUy[indices]
if __name__ == '__main__':
#Get AvgVel Field and Rotate Frame
#Save Vel Field as AvgUx and AvgUy
#READ ALL AVG FILES IN A SIMULATION DIRECTORY
#EXTRACT AVERAGE FIELD DATA INTO NUMPY ARRAYS
#PLOT AVERAGED FIELD DATA
#Simulation Parameters
#Extract Position Data
#Calculate # Periods
DUMP_INT = 20.0
nTime = GetPosDataLength(cwd_POS)
nPer = int(np.trunc(1.0*nTime/DUMP_INT))
#nPer = 2
#Paths to data and plots
cwd_DATA = cwd_Re
countPer = 0
for countPer in range(nPer):
if(countPer == perNumber):
AVGPlot = pathlib.Path(cwd_DATA+'AVG_%04d.csv'%countPer)
if AVGPlot.exists ():
start = t.clock()
#Get Avg Field Data
mx,my,avgW,avgP,avgUx,avgUy = GetAvgFieldData(cwd_DATA,countPer)
#Extract Position and Time Data
time = np.round(0.05 + countPer*PERIOD,2)
posData = GetPosData(cwd_POS,time,float(Theta),float(Hx),float(Hy))
#Plot Averaged Field Data
#Vorticity And Streamlines
mx,my,avgUx,avgUy,posData = RotateSimulation(cwd_PYTHON,time,mx,my,avgUx,avgUy,posData)
stend = t.clock()
diff = stend - start
print('Time to run for 1 period = %.5fs'%diff)
sys.stdout.flush()
#Visual Check of vel field's data (any idx)
PlotAvgU(cwd_FIGS,mx,my,avgUx,avgUy,posData,6)
#Now that we have the abg velocity field, we can calculate particle trajectories
#Let's start with 10
# Initialize 10 random coordinates
# Each coordinate will have a lifetime from 60 -> 240 frames
# For each, calculate the new position based on the velocity field (do this for twenty timesteps)
# The velocity field does not evolve
# Create array of points (use interpolate.griddata to find velocities)
# pos += dt*velocity_interp
# Plot scatter which decreases in opacity and point size for timesteps going backward
# Advance time for each new frame
#Initialize Uniform Distribution of points. The structure should be a 2D ndarray
#Choose points in range (-7.5,7.5) for both x and y
#Flatten avg Velocity field
#print(avgU[1,281,248])
avgU = np.array([[avgUx],[avgUy]])
avgUx = avgUx.flatten()
avgUy = avgUy.flatten()
magU = np.hypot(avgUx,avgUy)
while np.amax(magU)*timestep > 0.75*dX:
timestep *= 0.95
print('timestep = ',timestep)
#print('dX = ',dX)
#print('max dX = ',timestep*np.amax(magU))
assert 0.75*dX >= np.amax(magU)*timestep
particles = Particles(rows,cols,minVal,maxVal,nPart,nTrail)
#Initialize Particles
#Find velocity by index value (no interpolation)
for idTime in range(nTrail):
#Calculate Mesh Index for idTime
particles.CalcMeshIndex(idTime)
#Assign velocity by index (no interp)
particles.AssignVelocity(idTime,NX,avgUx,avgUy)
#Update position idTime+1
if idTime < nTrail - 1:
changeX = timestep*particles.vx[idTime]
changeY = timestep*particles.vy[idTime]
particles.x[idTime+1:nTime] += changeX
particles.y[idTime+1:nTime] += changeY
#Increase age of particles
particles.curr_age[:idTime+1] -= 1
#Save Initial particle stream pos and vel
particles.xinit = particles.x[0,:].copy()#particles.x.copy()
particles.yinit = particles.y[0,:].copy()#particles.y.copy()
particles.vxinit = particles.vx[0,:].copy()#particles.vx.copy()
particles.vyinit = particles.vy[0,:].copy()#particles.vy.copy()
'''
print('B4')
print('life[100] = ',particles.age[:,100])
print('curr_age[100] = ',particles.curr_age[:,100])
print('x[100] = ',particles.x[:,100])
print('y[100] = ',particles.y[:,100])
print('vx[100] = ',particles.vx[:,100])
print('vy[100] = ',particles.vy[:,100])
'''
#Loop over # of frames
for idxFrame in range(2*nFrames):
#Check Age
particles.x = np.where(particles.curr_age >= particles.age, particles.xinit, particles.x)
particles.y = np.where(particles.curr_age >= particles.age, particles.yinit, particles.y)
particles.vx = np.where(particles.curr_age >= particles.age, particles.vxinit, particles.vx)
particles.vy = np.where(particles.curr_age >= particles.age, particles.vyinit, particles.vy)
particles.age = np.where(particles.curr_age >= particles.age, nFrames, particles.age)
particles.curr_age = np.where(particles.curr_age >= particles.life, 0, particles.curr_age)
particles.life = particles.age.copy()
#Plot Particle Stream
if idxFrame >= nFrames:
PlotParticles(mx,my,avgU,posData,particles,idxFrame-nFrames,cwd_FIGS)
#Increase particle age
particles.curr_age += 1
#Roll positions and velocities back 1. Will need to calculate last position each particle
particles.x = np.roll(particles.x,-1,axis=0)
particles.y = np.roll(particles.y,-1,axis=0)
particles.vx = np.roll(particles.vx,-1,axis=0)
particles.vy = np.roll(particles.vy,-1,axis=0)
#Change Position of last value by using velocity of 2nd to last
changeX = timestep*particles.vx[-2]
#print('vy[-2,1] = ',particles.vy[-2,1])
changeY = timestep*particles.vy[-2]
#print('changeY = ',changeY[1])
particles.x[-1] = particles.x[-2] + changeX
particles.y[-1] = particles.y[-2] + changeY
#Update Mesh Index
particles.CalcMeshIndex(-1)
#Update Velocity of first particle in trail
particles.AssignVelocity(-1,NX,avgUx,avgUy)
if idxFrame %60 == 0:
print('Frame {0} is complete'.format(idxFrame))
sys.stdout.flush()
os.chdir(cwd_FIGS)
strMovie = "ffmpeg -r 60 -i PartStream_T{0}_Hx{1}_Hy{2}_%d_.png -vcodec libx264 -pix_fmt yuv420p -y PartMov_T{0}_Hx{1}_Hy{2}_.mp4".format(Theta,np.round(float(Hx)/2.0,2),np.round(float(Hy)/2.0,1))
os.system(strMovie)
os.system("rm -rf PartStream_T{0}_Hx{1}_Hy{2}_*".format(Theta,np.round(float(Hx)/2.0,2),np.round(float(Hy)/2.0,1)))
os.chdir(cwd_PYTHON)
print('Movie T{0}: Hx{1}: Hy{2} is complete'.format(Theta,Hx,Hy))
|
def is_even(number):
if number % 2 == 0 and number > 0:
return 1
else:
return 0
def print_even_numbers(n):
counter = 0
iteration = 0
while True:
if counter < n:
if is_even(iteration):
print(iteration, end='')
counter += 1
if counter < n:
print('', end=', ')
else:
break
iteration += 1
a = int(input('Enter a: '))
print_even_numbers(a)
|
# Exercício 9.5 - Livro
with open('txt/pares.txt', 'r') as pares:
valores_pares = pares.readlines()
ultimo = len(valores_pares) - 1
primeiro = -1
for pos in range(ultimo, primeiro, -1):
print(f'{valores_pares[pos]}', end='')
|
# -*- encoding:utf-8 -*-
# __author__=='Gan'
# Write a program to find the nth super ugly number.
# Super ugly numbers are positive numbers whose all prime factors are in the given prime list primes of size k.
# For example, [1, 2, 4, 7, 8, 13, 14, 16, 19, 26, 28, 32] is the sequence of the first 12 super ugly
# numbers given primes = [2, 7, 13, 19] of size 4.
# Note:
# (1) 1 is a super ugly number for any given primes.
# (2) The given numbers in primes are in ascending order.
# (3) 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.
# (4) The nth super ugly number is guaranteed to fit in a 32-bit signed integer.
# Credits:
# Special thanks to @dietpepsi for adding this problem and creating all test cases.
# 83 / 83 test cases passed.
# Status: Accepted
# Runtime: 1035 ms
# Your runtime beats 41.02 % of python submissions.
class Solution(object):
def nthSuperUglyNumber(self, n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
if n <= 0:
return 0
ans = [1]
primes_map = {p: 0 for p in primes}
for i in range(1, n):
ans += min([x * ans[primes_map[x]] for x in primes_map]),
for i in primes:
if ans[-1] == i * ans[primes_map[i]]:
primes_map[i] += 1
return ans[n - 1]
# 83 / 83 test cases passed.
# Status: Accepted
# Runtime: 239 ms
# Your runtime beats 98.83 % of python submissions.
import heapq
class Solution(object):
def nthSuperUglyNumber(self, n, primes):
"""
:type n: int
:type primes: List[int]
:rtype: int
"""
ans = [1] * n
idx = [0] * n # Handle the same value that in answer.
p_index = [0] * len(primes)
ugly_heap = []
for i, k in enumerate(primes):
heapq.heappush(ugly_heap, (k, i))
for i in range(1, n):
top, k = heapq.heappop(ugly_heap)
ans[i] = top
idx[i] = k
p_index[k] += 1
# When id[p_index[k]] > k that means the same value has been pushed to heap.
# For example, when pushing 2 * 7 that 7 * 2 has been pushed, so skip this value.
while idx[p_index[k]] > k:
p_index[k] += 1
heapq.heappush(ugly_heap, (primes[k] * ans[p_index[k]], k))
return ans[-1]
if __name__ == '__main__':
print(Solution().nthSuperUglyNumber(
50, [2, 7, 13, 19]
))
|
import tkinter
import time
import threading
import math
import sys
class Vertex:
def __init__(self, name, x, y):
self.name = name
self.x = x
self.y = y
self.edges = {}
self.visited = False
self.dist = sys.maxsize
self.parent = None
self.dist_text = None
def connect(self, name, dist):
self.edges[name] = dist
class Graph:
rad = 10
def __init__(self, length, width):
self.width = width
self.length = length
self.window = tkinter.Tk()
self.canvas = tkinter.Canvas(self.window, width=self.width, height=self.length)
self.canvas.pack()
self.vertices = {}
tkinter.Button(self.window, text="Solve", command=self.solve).pack()
def add_vertex(self, name, x, y):
vertex = Vertex(name, x, y)
self.vertices[name] = vertex
self.draw_vertex(vertex)
def solve(self):
thread = ThreadedTask(self)
thread.start()
def draw_vertex(self, vertex, color="green"):
if color == "green":
if vertex.visited:
color = "red"
self.circle(vertex.x, vertex.y, Graph.rad, color)
self.canvas.create_text(vertex.x, vertex.y, font="Purisa", text=vertex.name)
if vertex.dist != sys.maxsize:
if vertex.dist_text is not None:
self.canvas.delete(vertex.dist_text)
vertex.dist_text = self.canvas.create_text(vertex.x + Graph.rad, vertex.y - Graph.rad * 2, font="Purisa",
text=str(vertex.dist))
def circle(self, x, y, r, color):
return self.canvas.create_oval(x - r, y - r, x + r, y + r, fill=color)
def connect_vertices(self, name_a, name_b):
vertex_a = self.vertices[name_a]
vertex_b = self.vertices[name_b]
distance = int(math.sqrt((vertex_a.x - vertex_b.x) ** 2 + (vertex_a.y - vertex_b.y) ** 2))
vertex_a.connect(vertex_b.name, distance)
vertex_b.connect(vertex_a.name, distance)
self.canvas.create_line(vertex_a.x, vertex_a.y, vertex_b.x, vertex_b.y)
# self.canvas.create_text((vertex_a.x + vertex_b.x) / 2, (vertex_a.y + vertex_b.y) / 2, font="Purisa",
# text=str(distance))
self.draw_vertex(vertex_a)
self.draw_vertex(vertex_b)
def start(self):
self.window.mainloop()
def min_distance(self):
min_name = ""
min_dist = sys.maxsize
for name, vertex in self.vertices.items():
if not vertex.visited:
if vertex.dist < min_dist:
min_name = name
min_dist = vertex.dist
return self.vertices[min_name]
def dijkstra(self, name_src, name_dest):
src_vertex = self.vertices[name_src]
src_vertex.dist = 0
for count in range(0, len(self.vertices) - 1):
vertex = self.min_distance()
vertex.visited = True
time.sleep(.4)
self.draw_vertex(vertex, "orange")
for name, dist in vertex.edges.items():
n_vertex = self.vertices[name]
if not n_vertex.visited and n_vertex.dist > dist + vertex.dist:
n_vertex.dist = dist + vertex.dist
n_vertex.parent = vertex
self.draw_vertex(n_vertex, "yellow")
time.sleep(1)
self.draw_vertex(n_vertex)
vertex.visiting = False
self.draw_vertex(vertex)
vertex = self.vertices[name_dest]
while vertex.name != name_src:
self.draw_vertex(vertex, "blue")
vertex = vertex.parent
self.draw_vertex(vertex, "blue")
class ThreadedTask(threading.Thread):
def __init__(self, graph):
threading.Thread.__init__(self)
self.graph = graph
def run(self):
self.graph.dijkstra("a", "z")
g = Graph(500, 800)
g.add_vertex("a", 120, 50)
g.add_vertex("b", 100, 300)
g.add_vertex("c", 200, 100)
g.add_vertex("d", 300, 50)
g.add_vertex("e", 400, 40)
g.add_vertex("f", 700, 100)
g.add_vertex("h", 600, 400)
g.add_vertex("i", 500, 420)
g.add_vertex("j", 450, 320)
g.add_vertex("k", 550, 220)
g.add_vertex("l", 220, 220)
g.add_vertex("m", 320, 100)
g.add_vertex("z", 750, 440)
g.connect_vertices("a", "b")
g.connect_vertices("a", "d")
g.connect_vertices("c", "b")
g.connect_vertices("a", "c")
g.connect_vertices("c", "d")
g.connect_vertices("b", "i")
g.connect_vertices("i", "h")
g.connect_vertices("c", "j")
g.connect_vertices("c", "k")
g.connect_vertices("b", "j")
g.connect_vertices("j", "k")
g.connect_vertices("j", "h")
g.connect_vertices("k", "h")
g.connect_vertices("k", "f")
g.connect_vertices("k", "z")
g.connect_vertices("e", "k")
g.connect_vertices("d", "e")
g.connect_vertices("e", "f")
g.connect_vertices("f", "z")
g.connect_vertices("h", "z")
g.connect_vertices("e", "j")
g.connect_vertices("a", "l")
g.connect_vertices("l", "j")
g.connect_vertices("a", "m")
g.connect_vertices("m", "j")
g.start()
|
#!/usr/bin/env python
import Keylogger
print("Please enter your gmail's credentials here so that log email can be sent.")
print("")
email = input("Email Address : ")
password = input("Email's password : ")
print("Enter the number of seconds in which you want the email to be sent-")
seconds = input("seconds : ")
# time_interval = input("Enter the Time Interval After Which You Need the Records : ")
my_keylogger = Keylogger.Keylogger(seconds, email, password)
my_keylogger.start()
|
"""Get EXIF data from a directory of photos"""
from pyexiv2 import Image # type: ignore
import pyexiv2
import os
import re
pyexiv2.set_log_level(4)
def scan_tree(directory: str):
"""Recursively yield DirEntry objects for given directory.
:param directory: A string with the directory to scan for photos.
:returns: A list of files in the directory."""
for entry in os.scandir(directory):
if entry.is_dir(follow_symlinks=False):
yield from scan_tree(entry.path) # see below for Python 2.x
else:
yield entry
def get_photos(this_directory: str) -> list:
"""Get photos from a directory.
:param this_directory: a directory containing photos
:returns: A list containing photo files with complete path
"""
directory_files: list = scan_tree(this_directory)
regex = re.compile('cr2$|jpg$|dng$') # needs to be generalized for all possible photo extensions
photos: list = []
for file in directory_files:
if regex.search(file.name.lower()) and file.is_file():
photos.append(file.path)
return photos
def get_exif(photo_list: list) -> list:
"""Obtain exif data for each photo from the list.
:param photo_list: A list of photos from a directory
:returns: A list of exif dictionaries from photos
"""
exif_list = []
for image in photo_list:
try:
this_image = Image(image)
except RuntimeError: # cannot open dng
print(f"Could not get exif data for {image}")
raise
try:
this_image_exif = this_image.read_exif()
except UnicodeDecodeError:
print(f"Unicode error with {image}. Trying a different encoding")
this_image_exif = this_image.read_exif(encoding='iso-8859-1')
exif_list.append(this_image_exif)
return exif_list
|
from tkinter import *
root = Tk()
display = Entry(root)
display.grid(sticky=N,columnspan=50)
def getvarriables(number):
current = display.get()
display.delete(0, END)
display.insert(0, (current) + (number))
def cleardata():
display.delete(0, END)
def add():
first_num = display.get()
global f_num
global math
math = "addition"
f_num = int(first_num)
display.delete(0, END)
def sub():
first_num = display.get()
global f_num
global math
math = "substraction"
f_num = int(first_num)
display.delete(0, END)
def mult():
first_num = display.get()
global f_num
global math
math = "multiplication"
f_num = int(first_num)
display.delete(0, END)
def div():
first_num = display.get()
global f_num
global math
math = "division"
f_num = int(first_num)
display.delete(0, END)
def equals():
second_num = display.get()
display.delete(0, END)
if math == "addition":
display.insert(0, f_num + int(second_num))
elif math == "substraction":
display.insert(0, f_num - int(second_num))
elif math == "multiplication":
display.insert(0, f_num * int(second_num))
elif math == "division":
display.insert(0, f_num / int(second_num))
label_1 = Button(root, text="1", command=lambda: getvarriables("1"), padx=40,pady=30)
label_2 = Button(root, text="2", command=lambda: getvarriables("2"), padx=40,pady=30)
label_3 = Button(root, text="3", command=lambda: getvarriables("3"), padx=40,pady=30)
label_4 = Button(root, text="4", command=lambda: getvarriables("4"), padx=40,pady=30)
label_5 = Button(root, text="5", command=lambda: getvarriables("5"), padx=40,pady=30)
label_6 = Button(root, text="6", command=lambda: getvarriables("6"), padx=40,pady=30)
label_7 = Button(root, text="7", command=lambda: getvarriables("7"), padx=40,pady=30)
label_8 = Button(root, text="8", command=lambda: getvarriables("8"), padx=40,pady=30)
label_9 = Button(root, text="9", command=lambda: getvarriables("9"), padx=40,pady=30)
label_0 = Button(root, text="0", command=lambda: getvarriables("0"), padx=40,pady=30)
label_add = Button(root, text="+", command=add, padx=40,pady=30)
label_sub = Button(root, text="-", command=sub, padx=40,pady=30)
label_mul = Button(root, text="*", command=mult, padx=40,pady=30)
label_div = Button(root, text="/", command=div, padx=40,pady=30)
label_clr = Button(root, text="clr", command=cleardata, padx=40,pady=30)
label_eqls = Button(root, text="=", command=equals, padx=40,pady=30)
label_1.grid(row=1, column=0, columnspan=1)
label_2.grid(row=1, column=1, columnspan=1)
label_3.grid(row=1, column=2, columnspan=1)
label_4.grid(row=2, column=0, columnspan=1)
label_5.grid(row=2, column=1, columnspan=1)
label_6.grid(row=2, column=2, columnspan=1)
label_7.grid(row=3, column=0, columnspan=1)
label_8.grid(row=3, column=1, columnspan=1)
label_9.grid(row=3, column=2, columnspan=1)
label_0.grid(row=4, column=1, columnspan=1)
label_add.grid(row=1, column=3, columnspan=1)
label_sub.grid(row=2, column=3, columnspan=1)
label_mul.grid(row=3, column=3, columnspan=1)
label_div.grid(row=4, column=3, columnspan=1)
label_clr.grid(row=4, column=0, columnspan=1)
label_eqls.grid(row=4, column=2, columnspan=1)
root.mainloop()
|
import boto3
from pprint import pprint
BUCKET = "mygirlfriend"
KEY_SOURCE = "1.jpg"
KEY_TARGET = "2.jpg"
def compare_faces(bucket, key, bucket_target, key_target, threshold=80, region="ap-southeast-2"):
rekognition = boto3.client("rekognition", region)
response = rekognition.compare_faces(
SourceImage={
"S3Object": {
"Bucket": 'mygirlfriend',
"Name": '1.jpg',
}
},
TargetImage={
"S3Object": {
"Bucket": 'mygirlfriend',
"Name": '2.jpg',
}
},
SimilarityThreshold=threshold,
)
return response['SourceImageFace'], response['FaceMatches']
source_face, matches = compare_faces(BUCKET, KEY_SOURCE, BUCKET, KEY_TARGET)
# the main source face
pprint("Source Face ({Confidence}%)".format(**source_face))
# one match for each target face
for match in matches:
print("Target Face ({Confidence}%)".format(**match['Face']))
print(" Similarity : {}%".format(match['Similarity']))
|
import multiprocessing as mp
import queue
import numpy as np
import time
import random
def main():
trainbatch_q = mp.Queue(10)
batchperq = 50
event = mp.Event()
tl1 = mp.Process(target=proc, args=( trainbatch_q, 20, batchperq, event))
print("Got here")
tl1.start()
time.sleep(3)
event.set()
tl1.join()
print("Never printed..")
def proc(batch_q, batch_size, batchperentry, the_event):
print("enter")
nrow = 100000
i0 = 0
to_q = []
while i0 < nrow:
rowend = min(i0 + batch_size,nrow)
somerows = random.randint(0,5,(rowend-i0,2))
to_q.append(somerows.tolist())
if len(to_q) == batchperentry:
print("adding..", i0, len(to_q))
while not the_event.is_set():
try:
batch_q.put(to_q, block=False)
to_q = []
break
except queue.Full:
time.sleep(1)
i0 += batch_size
print("proc finishes")
return
if __name__ == "__main__":
main()
|
"""
An image classifier using a tf.keras.Sequential model and load data using tf.keras.preprocessing.image.ImageDataGenerator
"""
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing import image
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
path_to_zip = tf.keras.utils.get_file('cats_and_dogs.zip', origin=_URL, extract=True)
PATH = os.path.join(os.path.dirname(path_to_zip), 'cats_and_dogs_filtered')
train_dir = os.path.join(PATH, 'train')
validation_dir = os.path.join(PATH, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats')
train_dogs_dir = os.path.join(train_dir, 'dogs')
validation_cats_dir = os.path.join(validation_dir, 'cats')
validation_dogs_dir = os.path.join(validation_dir, 'dogs')
num_cats_tr = len(os.listdir(train_cats_dir))
num_dogs_tr = len(os.listdir(train_dogs_dir))
num_cats_val = len(os.listdir(validation_cats_dir))
num_dogs_val = len(os.listdir(validation_dogs_dir))
total_train = num_cats_tr + num_dogs_tr
total_val = num_cats_val + num_dogs_val
batch_size = 128
epochs = 15
IMG_HEIGHT = 150
IMG_WIDTH = 150
# ===========================================================
try:
model_new = tf.keras.models.load_model("model_new.h5")
except:
image_gen_train = ImageDataGenerator(
rescale=1./255,
rotation_range=45,
width_shift_range=.15,
height_shift_range=.15,
horizontal_flip=True,
zoom_range=0.5
)
train_data_gen = image_gen_train.flow_from_directory(batch_size=batch_size,
directory=train_dir,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
image_gen_val = ImageDataGenerator(rescale=1./255)
val_data_gen = image_gen_val.flow_from_directory(batch_size=batch_size,
directory=validation_dir,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='binary')
model_new = Sequential([
Conv2D(16, 3, padding='same', activation='relu',
input_shape=(IMG_HEIGHT, IMG_WIDTH ,3)),
MaxPooling2D(),
Dropout(0.2),
Conv2D(32, 3, padding='same', activation='relu'),
MaxPooling2D(),
Conv2D(64, 3, padding='same', activation='relu'),
MaxPooling2D(),
Dropout(0.2),
Flatten(),
Dense(512, activation='relu'),
Dense(1)
])
model_new.compile(optimizer='adam',
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
model_new.summary()
history = model_new.fit(
train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=total_val // batch_size
)
model_new.save("model_new.h5")
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# ================================================================
img1 = image.load_img('images/ct.jpg', target_size=(IMG_HEIGHT, IMG_WIDTH))
img = image.img_to_array(img1)
img = img/255
img = np.expand_dims(img, axis=0)
prediction = model_new.predict(img, batch_size=None,steps=1) #gives all class prob.
print(prediction)
if(prediction[:,:]>0.5):
value ='Dog :%1.2f'%(prediction[0,0])
plt.text(20, 62,value,color='red',fontsize=18,bbox=dict(facecolor='white',alpha=0.8))
else:
value ='Cat :%1.2f'%(1.0-prediction[0,0])
plt.text(20, 62,value,color='red',fontsize=18,bbox=dict(facecolor='white',alpha=0.8))
plt.imshow(img1)
plt.show()
|
class Menu:
def __init__(self, title="Menu", options=["Option 1", "Option 2"]):
self.options = options
self.result = -1
self.title = title
def __str__(self):
result = ""
self.options.append(self.title)
maxLength = int(len(max(self.options, key=len)) + len(str(len(self.options))))
dashes = int((maxLength - (len(self.title) - 2)) / 2)
self.options.remove(self.title)
for dash in range(dashes):
result += "-"
result += " " + str(self.title) + " "
for dash in range(dashes):
result += "-"
result += "\n"
for index, option in enumerate(self.options):
result += str(index + 1) + ": " + option + "\n"
for dash in range((dashes * 2) + len(self.title) + 2):
result += "-"
result += "\n"
return result
def get_input(self):
print(self.__str__())
self.result = int(input("Enter the number of your choice: ")) - 1
def get_result(self):
if (self.result < 0):
print("Invalid choice: " + str(self.result))
else:
return self.options[self.result]
|
from dynaconf import Dynaconf
settings = Dynaconf(
load_dotenv=True,
environments=True,
settings_files=['settings.toml'],
{%- if cookiecutter.enable_vault_loader == 'y' %}
loaders=['dynaconf.loaders.vault_loader'],
{%- endif %}
)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 25 19:36:36 2017
@author: anders & torgeir
"""
'''
620031587
Net-Centric Computing Assignment
Part A - RSA Encryption
'''
import random
from datetime import datetime
'''
Euclid's algorithm for determining the greatest common divisor
Use iteration to make it faster for larger integers
'''
def gcd(a, b):
while b != 0:
a, b = b, a % b
return a
'''
Euclid's extended algorithm for finding the multiplicative inverse of two numbers
'''
def multiplicative_inverse(e, phi):
d = 0
x1 = 0
x2 = 1
y1 = 1
temp_phi = phi
while e > 0:
temp1 = temp_phi/e
temp2 = temp_phi - temp1 * e
temp_phi = e
e = temp2
x = x2- temp1* x1
y = d - temp1 * y1
x2 = x1
x1 = x
d = y1
y1 = y
if temp_phi == 1:
return d + phi
'''
Tests to see if a number is prime.
'''
def is_prime(num):
if num == 2:
return True
if num < 2 or num % 2 == 0:
return False
for n in xrange(3, int(num**0.5)+2, 2):
if num % n == 0:
return False
return True
def generate_keypair(p, q):
if not (is_prime(p) and is_prime(q)):
raise ValueError('Both numbers must be prime.')
elif p == q:
raise ValueError('p and q cannot be equal')
#n = pq
n = p * q
#Phi is the totient of n
phi = (p-1) * (q-1)
#Choose an integer e such that e and phi(n) are coprime
e = random.randrange(1, phi)
#Use Euclid's Algorithm to verify that e and phi(n) are comprime
g = gcd(e, phi)
while g != 1:
e = random.randrange(1, phi)
g = gcd(e, phi)
#Use Extended Euclid's Algorithm to generate the private key
d = multiplicative_inverse(e, phi)
#Return public and private keypair
#Public key is (e, n) and private key is (d, n)
return ((e, n), (d, n))
def MonPro(A, B, n):
a_bin = bin(A)[2:].zfill(len(bin(n)[2:]))
u = 0
for a_bit in a_bin[::-1]:
if a_bit == '1':
u += B
if (u % 2) == 1:
u += n
u = u/2
if u > n:
return u-n
return u
def BinExp(message, e, n):
k = 0
ebin = str(bin(e))[2:]
if len(ebin) == 1:
C = message
return C
for k in range (len(ebin)-1, -1, -1):
if k == len(ebin)-1:
if ebin[k-1] == 1:
C = message
else:
C = 1
for i in range(k-2, -1, -1):
if ebin[i] == 1:
C = C * message % n
else:
C = C * C % n
return C
def ModInverse(invvar, j):
y = 1
for i in range (2, j+1):
if (2 ** (i-1)) < ((invvar*y) % 2**i):
y+=2**(i-1)
else:
y=y
return y
def BinSplit(n):
binsend = []
binnum = str(bin(n))[2:]
n_place = 0
n_count = len(binnum)-1
while n_place == 0:
if binnum[n_count] > 0:
n_place = n_count
else:
n_count -= 1
binodd = binnum[:n_place]
bineven = binnum[n_place:]
binsend.append(len(bineven))
binsend.append(int(binodd,2))
return binsend
def extended_gcd(a,b):
out = []
t = 1; oldt = 0
r = b; oldr = a
while r != 0:
quotient = oldr / r
(oldr, r) = (r, oldr - quotient*r)
(oldt, t) = (t, oldt - quotient*t)
out.append(oldr); out.append(oldt); out.append(r); out.append(t)
return out
def ModExp(message, e, n):
r = 2 ** len(bin(n)[2:]) # r = (r mod n) + n
n_merket = -extended_gcd(r,n)[1]
#NOTE! "r mod n" and "r*r mod n" is given in the exercise
M_strek = MonPro(message,(r*r) % n,n)
x_strek = r % n
for i in bin(e)[2:][::-1]:
if i == '1':
x_strek = MonPro(M_strek,x_strek,n)
M_strek = MonPro(M_strek,M_strek,n)
return MonPro(x_strek,1,n)
def torge_crypt(pk, M):
binret=[]
#Unpack the key into it's components
e, n = pk
if n % 2 == 0:
binret=BinSplit(n)
j = binret[0]
q = binret[1]
x1 = ModExp(M, e, q)
x2val = 2**j
x2_1 = M % x2val
x2_2 = e % 2**(j-1)
x2 = BinExp(x2_1, x2_2, x2val)
q_inv = ModInverse(q,j)
y = (x2 - x1)*q_inv % x2val
x = x1 + q*y
return x
else:
return ModExp(M, e, n)
if __name__ == '__main__':
'''
Detect if the script is being run directly by the user
'''
print "RSA Encrypter/ Decrypter"
p = int((raw_input("Enter a prime number (17, 19, 23, etc): ")).replace(",",""))
q = int((raw_input("Enter another prime number (Not one you entered above): ")).replace(",",""))
# p = 43
# q = 17
# message = 19
print "Generating your public/private keypairs now . . ."
public, private = generate_keypair(p, q)
print "Your public key is ", public ," and your private key is ", private
print "Enter a number to encrypt with your private key: "
print_string = "Needs to be less then " + str(private[1])+": "
message_str = raw_input(print_string)
message = int(message_str)
# private = (5,119)
# public = (77,119)
print "Message is:", message
print "encrypting message with private key", private, ". . ."
starttime = datetime.now()
encrypted_msg = torge_crypt(private, message)
print "Kryptert:",encrypted_msg
print "Decrypting message with public key ", public ," . . ."
decrypted_msg = torge_crypt(public, encrypted_msg)
print "Dekryptert", decrypted_msg
print "Tid:", datetime.now() - starttime
print ""
print "Fasit"
print ""
# Fasit_encrypted_msg = (message ** private[0]) % private[1]
# print Fasit_encrypted_msg
# Fasit_decrypted_msg = (Fasit_encrypted_msg ** public[0]) % public[1]
# print Fasit_decrypted_msg
#
# print ""
# if encrypted_msg == Fasit_encrypted_msg and decrypted_msg == Fasit_decrypted_msg:
# print "Woohooo, all pass!!"
# else:
# print "Buuhuu, noe gikk galt"
endtime = datetime.now()
print "Time:", endtime-starttime
print ""
print "Fasit"
print ""
Fasit_encrypted_msg = (message ** private[0]) % private[1]
print Fasit_encrypted_msg
Fasit_decrypted_msg = (Fasit_encrypted_msg ** public[0]) % public[1]
print Fasit_decrypted_msg
print ""
if encrypted_msg == Fasit_encrypted_msg and decrypted_msg == Fasit_decrypted_msg:
print "Woohooo, all pass!!"
else:
print "Buuhuu, noe gikk galt"
|
default_app_config = 'eshop_products_category.apps.EshopProductsCategoryConfig'
|
from __future__ import print_function
import base64
import binascii
import json
import flask
import six
from six.moves import cPickle as pickle
from six.moves import urllib, zip
from smqtk.algorithms import (
get_classifier_impls,
get_descriptor_generator_impls,
SupervisedClassifier
)
from smqtk.algorithms.classifier import (
ClassifierCollection,
)
from smqtk.exceptions import MissingLabelError
from smqtk.iqr import IqrSession
from smqtk.representation import (
ClassificationElementFactory,
DescriptorElementFactory,
)
from smqtk.representation.data_element.memory_element import DataMemoryElement
import smqtk.utils.plugin
from smqtk.utils import prob_utils
from smqtk.utils.web import make_response_json
import smqtk.web
# Get expected JSON decode exception
# noinspection PyProtectedMember
if hasattr(flask.json._json, 'JSONDecodeError'):
# noinspection PyProtectedMember
JSON_DECODE_EXCEPTION = getattr(flask.json._json, 'JSONDecodeError')
else:
# Exception thrown from ``json`` module.
if six.PY2:
JSON_DECODE_EXCEPTION = ValueError
else:
JSON_DECODE_EXCEPTION = json.JSONDecodeError
class SmqtkClassifierService (smqtk.web.SmqtkWebApp):
"""
Headless web-app providing a RESTful API for classifying new data against
a set of statically and dynamically loaded classifier models.
The focus of this service is an endpoint where the user can send the
base64-encoded data (with content type) they wish to be classified and get
back the classification results of all loaded classifiers applied to the
description of that data. Data for classification sent to this service is
expected to be in
Saved IQR session state bytes/files may be POST'ed to an endpoint with a
descriptive label to add to the suite of classifiers that are run for
user-provided data. The supervised classifier implementation that is
trained from this state is part of the server configuration.
Configuration Notes
-------------------
* The configured classifiers must all handle the descriptors output by the
descriptor generator algorithm. IQR states loaded into the server must
come from a service that also used the same descriptor generation
algorithm. Otherwise the classification of new data will not make sense
given the configured models as well as exceptions may occur due to
descriptor dimensionality issues.
* The classifier configuration provided for input IQR states should not
have model persistence parameters specified since these classifiers will
be ephemeral. If persistence parameters *are* specified, then subsequent
IQR-state-based classifier models will bash each other causing
erroneously labeled duplicate results.
"""
CONFIG_ENABLE_CLASSIFIER_REMOVAL = "enable_classifier_removal"
CONFIG_CLASSIFIER_COLLECTION = "classifier_collection"
CONFIG_CLASSIFICATION_FACTORY = "classification_factory"
CONFIG_DESCRIPTOR_GENERATOR = "descriptor_generator"
CONFIG_DESCRIPTOR_FACTORY = "descriptor_factory"
CONFIG_IMMUTABLE_LABELS = "immutable_labels"
CONFIG_IQR_CLASSIFIER = "iqr_state_classifier_config"
DEFAULT_IQR_STATE_CLASSIFIER_KEY = '__default__'
@classmethod
def is_usable(cls):
return True
@classmethod
def get_default_config(cls):
c = super(SmqtkClassifierService, cls).get_default_config()
c[cls.CONFIG_ENABLE_CLASSIFIER_REMOVAL] = False
# Static classifier configurations
c[cls.CONFIG_CLASSIFIER_COLLECTION] = \
ClassifierCollection.get_default_config()
# Classification element factory for new classification results.
c[cls.CONFIG_CLASSIFICATION_FACTORY] = \
ClassificationElementFactory.get_default_config()
# Descriptor generator for new content
c[cls.CONFIG_DESCRIPTOR_GENERATOR] = smqtk.utils.plugin.make_config(
get_descriptor_generator_impls()
)
# Descriptor factory for new content descriptors
c[cls.CONFIG_DESCRIPTOR_FACTORY] = \
DescriptorElementFactory.get_default_config()
# from-IQR-state *supervised* classifier configuration
c[cls.CONFIG_IQR_CLASSIFIER] = smqtk.utils.plugin.make_config(
get_classifier_impls(
sub_interface=SupervisedClassifier
)
)
c[cls.CONFIG_IMMUTABLE_LABELS] = []
return c
def __init__(self, json_config):
super(SmqtkClassifierService, self).__init__(json_config)
self.enable_classifier_removal = \
bool(json_config[self.CONFIG_ENABLE_CLASSIFIER_REMOVAL])
self.immutable_labels = set(json_config[self.CONFIG_IMMUTABLE_LABELS])
# Convert configuration into SMQTK plugin instances.
# - Static classifier configurations.
# - Skip the example config key
# - Classification element factory
# - Descriptor generator
# - Descriptor element factory
# - from-IQR-state classifier configuration
# - There must at least be the default key defined for when no
# specific classifier type is specified at state POST.
# Classifier collection + factor
self.classification_factory = \
ClassificationElementFactory.from_config(
json_config[self.CONFIG_CLASSIFICATION_FACTORY]
)
self.classifier_collection = ClassifierCollection.from_config(
json_config[self.CONFIG_CLASSIFIER_COLLECTION]
)
# Descriptor generator + factory
self.descriptor_factory = DescriptorElementFactory.from_config(
json_config[self.CONFIG_DESCRIPTOR_FACTORY]
)
#: :type: smqtk.algorithms.DescriptorGenerator
self.descriptor_gen = smqtk.utils.plugin.from_plugin_config(
json_config[self.CONFIG_DESCRIPTOR_GENERATOR],
smqtk.algorithms.get_descriptor_generator_impls()
)
# Classifier config for uploaded IQR states.
self.iqr_state_classifier_config = \
json_config[self.CONFIG_IQR_CLASSIFIER]
self.add_routes()
def add_routes(self):
# REST API endpoint routes
#
# Example:
# self.add_url_rule('/endpoint',
# view_func=self.something,
# methods=['GET'])
#
self.add_url_rule('/is_ready',
view_func=self.is_ready,
methods=['GET'])
self.add_url_rule('/classifier_labels',
view_func=self.get_classifier_labels,
methods=['GET'])
self.add_url_rule('/classifier_metadata',
view_func=self.get_classifier_metadata,
methods=['GET'])
self.add_url_rule('/classify',
view_func=self.classify,
methods=['POST'])
self.add_url_rule('/classifier',
view_func=self.get_classifier,
methods=['GET'])
self.add_url_rule('/classifier',
view_func=self.add_classifier,
methods=['POST'])
self.add_url_rule('/iqr_classifier',
view_func=self.add_iqr_state_classifier,
methods=['POST'])
if self.enable_classifier_removal:
self.add_url_rule('/classifier',
view_func=self.del_classifier,
methods=['DELETE'])
# GET /is_ready
# noinspection PyMethodMayBeStatic
def is_ready(self):
"""
Simple endpoint that just means this server is up and responding.
"""
return make_response_json("Yes, I'm alive!")
# GET /classifier_labels
def get_classifier_labels(self):
"""
Get the descriptive labels of the classifiers currently set to
classify input data.
Returns 200: {
labels: list[str]
}
"""
all_labels = self.classifier_collection.labels()
return make_response_json("Classifier labels.",
labels=list(all_labels))
# GET /classifier_metadata
def get_classifier_metadata(self):
"""
Get metadata associated with a specific classifier instance referred to
by label.
URL Arguments:
label
Reference label for a specific classifier to query.
Returns code 200 on success and the JSON return object: {
...
// Sequence of class labels that this classifier can classify
// descriptors into. This includes the negative label.
class_labels=<list[str]>
}
"""
label = flask.request.values.get('label', default=None)
if label is None or not label:
return make_response_json("No label provided.", return_code=400)
elif label not in self.classifier_collection.labels():
return make_response_json("Label '%s' does not refer to a "
"classifier currently registered."
% label,
return_code=404,
label=label)
class_labels = \
self.classifier_collection.get_classifier(label).get_labels()
return make_response_json("Success", return_code=200,
class_labels=class_labels)
# POST /classify
def classify(self):
"""
Given a file's bytes (standard base64-format) and content mimetype,
describe and classify the content against all currently stored
classifiers (optionally a list of requested classifiers), returning a
map of classifier descriptive labels to their class-to-probability
results.
We expect the data to be transmitted in the body of the request in
standard base64 encoding form ("bytes_b64" key). We look for the
content type either as URL parameter or within the body
("content_type" key).
Below is an example call to this endpoint via the ``requests`` python
module, showing how base64 data is sent::
import base64
import requests
data_bytes = "Load some content bytes here."
requests.post('http://localhost:5000/classify',
data={'bytes_b64': base64.b64encode(data_bytes),
'content_type': 'text/plain'})
With curl on the command line::
$ curl -X POST localhost:5000/classify \
-d "content_type=text/plain" \
--data-urlencode "bytes_b64=$(base64 -w0 /path/to/file)"
# If this fails, you may wish to encode the file separately and
# use the file reference syntax instead:
$ base64 -w0 /path/to/file > /path/to/file.b64
$ curl -X POST localhost:5000/classify \
-d "content_type=text/plain" \
--data-urlencode bytes_64@/path/to/file.b64
Optionally, the `label` parameter can be provided to limit the results
of classification to a set of classifiers::
$ curl -X POST localhost:5000/classify \
-d "content_type=text/plain" \
-d 'label=["some_label","other_label"]' \
--data-urlencode "bytes_b64=$(base64 -w0 /path/to/file)"
# If this fails, you may wish to encode the file separately and
# use the file reference syntax instead:
$ base64 -w0 /path/to/file > /path/to/file.b64
$ curl -X POST localhost:5000/classify \
-d "content_type=text/plain" \
-d 'label=["some_label","other_label"]' \
--data-urlencode bytes_64@/path/to/file.b64
Data/Form arguments:
bytes_b64
Bytes in the standard base64 encoding to be described and
classified.
content_type
The mimetype of the sent data.
label
(Optional) JSON-encoded label or list of labels
adjustment
(Optional) JSON-encoded dictionary of labels to floats. Higher
values lower the gain on the class and therefore correspond to
higher precision (and lower recall) for the class (and higher
recall/lower precision for other classes). This translates git to
calling ``smqtk.utils.prob_utils.adjust_proba``.
Possible error codes:
400
No bytes provided, or provided labels are malformed
404
Label or labels provided do not match any registered
classifier
Returns: {
...
result: {
classifier-label: {
class-label: prob,
...
},
...
}
}
"""
data_b64 = flask.request.values.get('bytes_b64', default=None)
content_type = flask.request.values.get('content_type', default=None)
label_str = flask.request.values.get('label', default=None)
adjustment_str = flask.request.values.get('adjustment', default=None)
labels = None
if label_str is not None:
try:
labels = flask.json.loads(label_str)
if isinstance(labels, six.string_types):
labels = [labels]
elif isinstance(labels, list):
for el in labels:
if not isinstance(el, six.string_types):
return make_response_json(
"Label must be a list of strings or a"
" single string.", 400)
else:
return make_response_json(
"Label must be a list of strings or a single"
" string.", 400)
except JSON_DECODE_EXCEPTION:
# Unquoted strings aren't valid JSON. That is, a plain string
# needs to be passed as '"label"' rather than just 'label' or
# "label". However, we can be a bit more generous and just
# allow such a string, but we have to place *some* restriction
# on it. We use `urllib.quote` for this since essentially it
# just checks to make sure that the string is made up of one
# of the following types of characters:
#
# - letters
# - numbers
# - spaces, underscores, periods, and dashes
#
# Since the concept of a "letter" is fraught with encoding and
# locality issues, we simply let urllib make this decision for
# us.
# If label_str matches the url-encoded version of itself, go
# ahead and use it
if urllib.parse.quote(label_str, safe='') == label_str:
labels = [label_str]
else:
return make_response_json(
"Label(s) are not properly formatted JSON.", 400)
# Collect optional result probability adjustment values
#: :type: dict[collections.Hashable, float]
adjustments = {}
if adjustment_str is not None:
try:
#: :type: dict[collections.Hashable, float]
adjustments = flask.json.loads(adjustment_str)
for label, val in six.iteritems(adjustments):
if not isinstance(label, six.string_types):
return make_response_json(
"Adjustment label '%s' is not a string type."
% label,
400)
if not isinstance(val, (int, float)):
return make_response_json(
"Adjustment value %s for label '%s' is not an int "
"or float" % (val, label),
400)
except JSON_DECODE_EXCEPTION:
return make_response_json(
"Adjustment(s) are not properly formatted JSON.", 400)
if data_b64 is None:
return make_response_json("No base-64 bytes provided.", 400)
elif content_type is None:
return make_response_json("No content type provided.", 400)
data_bytes = base64.b64decode(data_b64.encode('utf-8'))
self._log.debug("Length of byte data: %d" % len(data_bytes))
data_elem = DataMemoryElement(data_bytes, content_type, readonly=True)
descr_elem = self.descriptor_gen.compute_descriptor(
data_elem, self.descriptor_factory
)
self._log.debug("Descriptor shape: %s", descr_elem.vector().shape)
try:
clfr_map = self.classifier_collection.classify(
descr_elem, labels=labels,
factory=self.classification_factory)
except MissingLabelError as ex:
return make_response_json(
"The following labels are not registered with any"
" classifiers: '%s'"
% "', '".join(ex.labels),
404,
missing_labels=list(ex.labels))
# Transform classification result into JSON
c_json = {}
for classifier_label, c_elem in six.iteritems(clfr_map):
prediction = c_elem.get_classification()
if adjustments:
proba_labels = list(prediction.keys())
proba = [prediction[k] for k in proba_labels]
# Use opposite of adjustments, because we already set the
# convention of "higher: precision, lower: recall"
adj = [-adjustments.get(label, 0.0) for label in proba_labels]
adj_proba = prob_utils.adjust_proba(proba, adj)
prediction = dict(zip(proba_labels, adj_proba[0]))
c_json[classifier_label] = prediction
return make_response_json('Finished classification.',
result=c_json)
# GET /classifier
def get_classifier(self):
"""
Download the classifier corresponding to the provided label, pickled
and encoded in standard base64 encoding.
Below is an example call to this endpoint via the ``requests`` python
module::
import base64
import requests
from six.moves import cPickle as pickle
r = requests.get('http://localhost:5000/classifier',
data={'label': 'some_label'})
data_bytes = base64.b64decode(r.content)
classifier = pickle.loads(data_bytes)
With curl on the command line::
$ curl -X GET localhost:5000/classifier -d label=some_label | \
base64 -d > /path/to/file.pkl
Data args:
label
Label of the requested classifier
Possible error codes:
400
No label provided
404
Label does not refer to a registered classifier
Returns: The pickled and encoded classifier
"""
label = flask.request.values.get('label', default=None)
if label is None or not label:
return make_response_json("No label provided.", 400)
elif label not in self.classifier_collection.labels():
return make_response_json("Label '%s' does not refer to a "
"classifier currently registered."
% label,
404,
label=label)
clfr = self.classifier_collection.get_classifier(label)
try:
return base64.b64encode(pickle.dumps(clfr)), 200
except pickle.PicklingError:
return make_response_json("Classifier corresponding to label "
"'%s' cannot be pickled." % label,
500,
label=label)
# POST /iqr_classifier
def add_iqr_state_classifier(self):
"""
Train a classifier based on the user-provided IQR state file bytes in
a base64 encoding, matched with a descriptive label of that
classifier's topic.
Since all IQR session classifiers end up only having two result
classes (positive and negative), the topic of the classifier is
encoded in the descriptive label the user applies to the classifier.
Below is an example call to this endpoint via the ``requests`` python
module, showing how base64 data is sent::
import base64
import requests
data_bytes = "Load some content bytes here."
requests.get('http://localhost:5000/iqr_classifier',
data={'bytes_b64': base64.b64encode(data_bytes),
'label': 'some_label'})
With curl on the command line::
$ curl -X POST localhost:5000/iqr_classifier \
-d "label=some_label" \
--data-urlencode "bytes_b64=$(base64 -w0 /path/to/file)"
# If this fails, you may wish to encode the file separately and
# use the file reference syntax instead:
$ base64 -w0 /path/to/file > /path/to/file.b64
$ curl -X POST localhost:5000/iqr_classifier -d label=some_label \
--data-urlencode bytes_64@/path/to/file.b64
To lock this classifier and guard it against deletion, add
"lock_label=true"::
$ curl -X POST localhost:5000/iqr_classifier \
-d "label=some_label" \
-d "lock_label=true" \
--data-urlencode "bytes_b64=$(base64 -w0 /path/to/file)"
Form arguments:
iqr_state_b64
base64 encoding of the bytes of the IQR session state save
file.
label
Descriptive label to apply to this classifier. This should not
conflict with existing classifier labels.
lock_label
If 'true', disallow deletion of this label. If 'false', allow
deletion of this label. Only has an effect if deletion is
enabled for this service. (Default: 'false')
Returns 201.
"""
data_b64 = flask.request.values.get('bytes_b64', default=None)
label = flask.request.values.get('label', default=None)
lock_clfr_str = flask.request.values.get('lock_label',
default='false')
if data_b64 is None or len(data_b64) == 0:
return make_response_json("No state base64 data provided.", 400)
elif label is None or len(label) == 0:
return make_response_json("No descriptive label provided.", 400)
try:
lock_clfr = bool(flask.json.loads(lock_clfr_str))
except JSON_DECODE_EXCEPTION:
return make_response_json("Invalid boolean value for"
" 'lock_label'. Was given: '%s'"
% lock_clfr_str,
400)
try:
# Using urlsafe version because it handles both regular and urlsafe
# alphabets.
data_bytes = base64.urlsafe_b64decode(data_b64.encode('utf-8'))
except (TypeError, binascii.Error) as ex:
return make_response_json("Invalid base64 input: %s" % str(ex)), \
400
# If the given label conflicts with one already in the collection,
# fail.
if label in self.classifier_collection.labels():
return make_response_json(
"Label already exists in classifier collection.", 400)
# Create dummy IqrSession to extract pos/neg descriptors.
iqrs = IqrSession()
iqrs.set_state_bytes(data_bytes, self.descriptor_factory)
pos = iqrs.positive_descriptors | iqrs.external_positive_descriptors
neg = iqrs.negative_descriptors | iqrs.external_negative_descriptors
del iqrs
# Make a classifier instance from the stored config for IQR
# session-based classifiers.
#: :type: SupervisedClassifier
classifier = smqtk.utils.plugin.from_plugin_config(
self.iqr_state_classifier_config,
get_classifier_impls(sub_interface=SupervisedClassifier)
)
classifier.train(class_examples={'positive': pos, 'negative': neg})
try:
self.classifier_collection.add_classifier(label, classifier)
# If we're allowing deletions, get the lock flag from the form and
# set it for this classifier
if self.enable_classifier_removal and lock_clfr:
self.immutable_labels.add(label)
except ValueError as e:
if e.args[0].find('JSON') > -1:
return make_response_json("Tried to parse malformed JSON in "
"form argument.", 400)
return make_response_json("Duplicate label ('%s') added during "
"classifier training of provided IQR "
"session state." % label, 400,
label=label)
return make_response_json("Finished training IQR-session-based "
"classifier for label '%s'." % label,
201,
label=label)
# POST /classifier
def add_classifier(self):
"""
Upload a **trained** classifier pickled and encoded in standard base64
encoding, matched with a descriptive label of that classifier's topic.
Since all classifiers have only two result classes (positive and
negative), the topic of the classifier is encoded in the descriptive
label the user applies to the classifier.
Below is an example call to this endpoint via the ``requests`` python
module, showing how base64 data is sent::
import base64
import requests
data_bytes = "Load some content bytes here."
requests.post('http://localhost:5000/classifier',
data={'bytes_b64': base64.b64encode(data_bytes),
'label': 'some_label'})
With curl on the command line::
$ curl -X POST localhost:5000/classifier -d label=some_label \
--data-urlencode "bytes_b64=$(base64 -w0 /path/to/file)"
# If this fails, you may wish to encode the file separately and
# use the file reference syntax instead:
$ base64 -w0 /path/to/file.pkl > /path/to/file.pkl.b64
$ curl -X POST localhost:5000/classifier -d label=some_label \
--data-urlencode bytes_64@/path/to/file.pkl.b64
To lock this classifier and guard it against deletion, add
"lock_label=true"::
$ curl -X POST localhost:5000/classifier \
-d "label=some_label" \
-d "lock_label=true" \
--data-urlencode "bytes_b64=$(base64 -w0 /path/to/file.pkl)"
Data/Form arguments:
bytes_b64
Bytes, in the standard base64 encoding, of the pickled
classifier.
label
Descriptive label to apply to this classifier. This should not
conflict with existing classifier labels.
lock_label
If 'true', disallow deletion of this label. If 'false', allow
deletion of this label. Only has an effect if deletion is
enabled for this service. (Default: 'false')
Possible error codes:
400
May mean one of:
- No pickled classifier base64 data or label provided.
- Label provided is in conflict with an existing label in
the classifier collection.
Returns code 201 on success and the message: {
label: <str>
}
"""
clfr_b64 = flask.request.values.get('bytes_b64', default=None)
label = flask.request.values.get('label', default=None)
lock_clfr_str = flask.request.values.get('lock_label',
default='false')
if clfr_b64 is None or len(clfr_b64) == 0:
return make_response_json("No state base64 data provided.", 400)
elif label is None or len(label) == 0:
return make_response_json("No descriptive label provided.", 400)
try:
# This can throw a ValueError if lock_clfr is malformed JSON
lock_clfr = bool(flask.json.loads(lock_clfr_str))
except JSON_DECODE_EXCEPTION:
return make_response_json("Invalid boolean value for"
" 'lock_label'. Was given: '%s'"
% lock_clfr_str,
400)
# If the given label conflicts with one already in the collection,
# fail.
if label in self.classifier_collection.labels():
return make_response_json("Label '%s' already exists in"
" classifier collection." % label,
400,
label=label)
clfr = pickle.loads(base64.b64decode(clfr_b64.encode('utf-8')))
try:
self.classifier_collection.add_classifier(label, clfr)
# If we're allowing deletions, get the lock flag from the form
# and set it for this classifier
if self.enable_classifier_removal and lock_clfr:
self.immutable_labels.add(label)
except ValueError:
return make_response_json("Data added for label '%s' is not a"
" Classifier." % label,
400,
label=label)
return make_response_json("Uploaded classifier for label '%s'."
% label,
201,
label=label)
# DEL /classifier
def del_classifier(self):
"""
Remove a classifier by the given label.
Form args:
label
Label of the classifier to remove.
Possible error codes:
400
No classifier exists for the given label.
Returns 200.
"""
label = flask.request.values.get('label', default=None)
if label is None or not label:
return make_response_json("No label provided.", 400)
elif label not in self.classifier_collection.labels():
return make_response_json("Label '%s' does not refer to a"
" classifier currently registered."
% label,
404,
label=label)
elif label in self.immutable_labels:
return make_response_json("Label '%s' refers to a classifier"
" that is immutable." % label,
405,
label=label)
self.classifier_collection.remove_classifier(label)
return make_response_json("Removed classifier with label '%s'."
% label,
removed_label=label)
|
class Solution:
def maxDepth(self, root: Optional[TreeNode]) -> int:
return 0 if not root else max(self.maxDepth(root.left) + 1, self.maxDepth(root.right)+1)
|
#!/usr/bin/python3.4
import os
import sys
import traceback
def pyvercheck():
if sys.version_info < (3, 0):
print("Requires python 3.x, exiting.")
sys.exit(1)
def run():
pyvercheck()
print("Child PID: "+str(os.getpid()))
try:
import ircbot
except ImportError:
print(traceback.format_exc())
print("Cannot find ircbot module, exiting")
sys.exit(1)
bot = ircbot.IRCBot()
bot.connect()
if __name__ == "__main__":
print("Master PID:"+str(os.getpid()))
do_fork = True
while do_fork:
cpid = os.fork()
if cpid == 0:
run()
pid, status = os.waitpid(cpid, 0)
if status == 1280 or 256:
print("Restarting child process..")
print(status)
do_fork = (status == 1280 or status == 256)
|
def subset_select(data):
set_size = {1: [data[:, :1], data[:, 1:2], data[:, 2:3], data[:, 3:4], data[:, 4:5], data[:, 5:6], data[:, 6:]]}
temp_list = []
for i in range(7):
for j in range(7):
if i < j:
temp_list.append(data[:, [i, j]])
set_size[2] = temp_list
temp_list = []
for i in range(7):
for j in range(7):
for k in range(7):
if i < j < k:
temp_list.append(data[:, [i, j, k]])
set_size[3] = temp_list
temp_list = []
for i in range(7):
for j in range(7):
for k in range(7):
for l in range(7):
if i < j < k < l:
temp_list.append(data[:, [i, j, k, l]])
set_size[4] = temp_list
temp_list = []
for i in range(7):
for j in range(7):
for k in range(7):
for l in range(7):
for m in range(7):
if i < j < k < l < m:
temp_list.append(data[:, [i, j, k, l, m]])
set_size[5] = temp_list
set_size[6] = [data[:, [0, 1, 2, 3, 4, 5]], data[:, [0, 6, 2, 3, 4, 5]], data[:, [0, 1, 6, 3, 4, 5]], data[:, [0, 1, 2, 6, 4, 5]], data[:, [0, 1, 2, 3, 6, 5]], data[:, [0, 1, 2, 3, 4, 6]], data[:, [6, 1, 2, 3, 4, 5]]]
set_size[7] = [data]
return set_size
|
#This module contains to infrastructure for handling communication with the VERITAS database
import pymysql
import datetime
class DBConnection:
def __init__(self, *args, **kwargs):
#Extract the database information from the configuration dictionary
configdict=kwargs.get('configdict')
self.host=configdict.get('GLOBALCONFIG').get('DBHOSTNAME')
self.db=configdict.get('GLOBALCONFIG').get('DBNAME')
self.user=configdict.get('GLOBALCONFIG').get('DBUSERNAME')
#connect to server and create cursor
self.connect()
#Connect to the db and create cursor
def connect(self):
self.dbcnx = pymysql.connect(host=self.host, db=self.db, user=self.user, cursorclass=pymysql.cursors.DictCursor)
self.cursor = self.dbcnx.cursor()
#Close connection
def close(self):
self.dbcnx.close()
#return the calibration (laser/flasher) run assigned to a given data run
def get_calib_run(self, runnum):
query="SELECT run_id FROM tblRun_Info WHERE run_type IN ('flasher', 'laser') AND run_id IN (SELECT run_id FROM tblRun_Group WHERE group_id = (SELECT group_id FROM tblRun_GroupComment WHERE group_id IN (SELECT group_id FROM tblRun_Group WHERE run_id = %s) AND group_type IN ('laser', 'flasher') LIMIT 1))"
self.cursor.execute(query, runnum)
try:
calib_run=str(self.cursor.fetchone().get('run_id'))
except AttributeError:
calib_run = None
err_str = "Could not determine the calibration run for run {0}.".format(runnum)
print(err_str)
return calib_run
#return the datetime associated with a given run
#note that this will return an object from the standard datetime class
def get_datetime(self, runnum):
query="SELECT db_start_time FROM tblRun_Info WHERE run_id = %s"
self.cursor.execute(query, runnum)
try:
dt=self.cursor.fetchone().get('db_start_time')
except AttributeError:
dt = None
err_str = "No datetime found for run {0}. Are you sure you have a valid run number?".format(runnum)
print(err_str)
return dt
#returns the dyyyymmdd date string associated with a given run
def get_ddate(self, runnum):
dt=self.get_datetime(runnum)
year=str(dt.year)
if(dt.month < 10):
month = '0' + str(dt.month)
else:
month = str(dt.month)
if(dt.day < 10):
day = '0' + str(dt.day)
else:
day = str(dt.day)
ddate = 'd' + year + month + day
return ddate
#returns the source_id for a given run
def get_source_id(self, runnum):
query="SELECT source_id FROM tblRun_Info WHERE run_id = %s"
self.cursor.execute(query, runnum)
source_id=self.cursor.fetchone().get('source_id')
return source_id
|
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
import subprocess
from io import BytesIO
from textwrap import dedent
from zipfile import ZipFile
import pytest
from pants.backend.google_cloud_function.python.rules import PythonGoogleCloudFunctionFieldSet
from pants.backend.google_cloud_function.python.rules import (
rules as python_google_cloud_function_rules,
)
from pants.backend.google_cloud_function.python.target_types import PythonGoogleCloudFunction
from pants.backend.google_cloud_function.python.target_types import rules as target_rules
from pants.backend.python.goals import package_pex_binary
from pants.backend.python.goals.package_pex_binary import PexBinaryFieldSet
from pants.backend.python.target_types import (
PexBinary,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
)
from pants.backend.python.target_types_rules import rules as python_target_types_rules
from pants.core.goals import package
from pants.core.goals.package import BuiltPackage
from pants.core.target_types import (
FilesGeneratorTarget,
FileTarget,
RelocatedFiles,
ResourcesGeneratorTarget,
)
from pants.core.target_types import rules as core_target_types_rules
from pants.engine.addresses import Address
from pants.engine.fs import DigestContents
from pants.testutil.python_rule_runner import PythonRuleRunner
from pants.testutil.rule_runner import QueryRule
@pytest.fixture
def rule_runner() -> PythonRuleRunner:
rule_runner = PythonRuleRunner(
rules=[
*package_pex_binary.rules(),
*python_google_cloud_function_rules(),
*target_rules(),
*python_target_types_rules(),
*core_target_types_rules(),
*package.rules(),
QueryRule(BuiltPackage, (PythonGoogleCloudFunctionFieldSet,)),
],
target_types=[
FileTarget,
FilesGeneratorTarget,
PexBinary,
PythonGoogleCloudFunction,
PythonRequirementTarget,
PythonSourcesGeneratorTarget,
RelocatedFiles,
ResourcesGeneratorTarget,
],
)
rule_runner.set_options([], env_inherit={"PATH", "PYENV_ROOT", "HOME"})
return rule_runner
def create_python_google_cloud_function(
rule_runner: PythonRuleRunner,
addr: Address,
*,
expected_extra_log_lines: tuple[str, ...],
extra_args: list[str] | None = None,
) -> tuple[str, bytes]:
rule_runner.set_options(
[
"--source-root-patterns=src/python",
*(extra_args or ()),
],
env_inherit={"PATH", "PYENV_ROOT", "HOME"},
)
target = rule_runner.get_target(addr)
built_asset = rule_runner.request(
BuiltPackage, [PythonGoogleCloudFunctionFieldSet.create(target)]
)
assert expected_extra_log_lines == built_asset.artifacts[0].extra_log_lines
digest_contents = rule_runner.request(DigestContents, [built_asset.digest])
assert len(digest_contents) == 1
relpath = built_asset.artifacts[0].relpath
assert relpath is not None
return relpath, digest_contents[0].content
@pytest.fixture
def complete_platform(rule_runner: PythonRuleRunner) -> bytes:
rule_runner.write_files(
{
"pex_exe/BUILD": dedent(
"""\
python_requirement(name="req", requirements=["pex==2.1.99"])
pex_binary(dependencies=[":req"], script="pex")
"""
),
}
)
result = rule_runner.request(
BuiltPackage, [PexBinaryFieldSet.create(rule_runner.get_target(Address("pex_exe")))]
)
rule_runner.write_digest(result.digest)
pex_executable = os.path.join(rule_runner.build_root, "pex_exe/pex_exe.pex")
return subprocess.run(
args=[pex_executable, "interpreter", "inspect", "-mt"],
env=dict(PEX_MODULE="pex.cli", **os.environ),
check=True,
stdout=subprocess.PIPE,
).stdout
def test_warn_files_targets(rule_runner: PythonRuleRunner, caplog) -> None:
rule_runner.write_files(
{
"assets/f.txt": "",
"assets/BUILD": dedent(
"""\
files(name='files', sources=['f.txt'])
relocated_files(
name='relocated',
files_targets=[':files'],
src='assets',
dest='new_assets',
)
# Resources are fine.
resources(name='resources', sources=['f.txt'])
"""
),
"src/py/project/__init__.py": "",
"src/py/project/app.py": dedent(
"""\
def handler(event, context):
print('Hello, World!')
"""
),
"src/py/project/BUILD": dedent(
"""\
python_sources(
name='lib',
dependencies=['assets:files', 'assets:relocated', 'assets:resources'],
)
python_google_cloud_function(
name='lambda',
dependencies=[':lib'],
handler='foo.bar.hello_world:handler',
runtime='python37',
type='event',
)
"""
),
}
)
assert not caplog.records
zip_file_relpath, _ = create_python_google_cloud_function(
rule_runner,
Address("src/py/project", target_name="lambda"),
expected_extra_log_lines=(
" Runtime: python37",
" Handler: handler",
),
)
assert caplog.records
assert "src.py.project/lambda.zip" == zip_file_relpath
assert (
"The target src/py/project:lambda (`python_google_cloud_function`) transitively depends on"
in caplog.text
)
assert "assets/f.txt:files" in caplog.text
assert "assets:relocated" in caplog.text
assert "assets:resources" not in caplog.text
@pytest.mark.parametrize(
("ics", "runtime"),
[
pytest.param(["==3.7.*"], None, id="runtime inferred from ICs"),
pytest.param(None, "python37", id="runtime explicitly set"),
],
)
def test_create_hello_world_gcf(
ics: list[str] | None, runtime: None | str, rule_runner: PythonRuleRunner
) -> None:
rule_runner.write_files(
{
"src/python/foo/bar/hello_world.py": dedent(
"""
import mureq
def handler(event, context):
print('Hello, World!')
"""
),
"src/python/foo/bar/BUILD": dedent(
f"""
python_requirement(name="mureq", requirements=["mureq==0.2"])
python_sources(interpreter_constraints={ics!r})
python_google_cloud_function(
name='gcf',
handler='foo.bar.hello_world:handler',
runtime={runtime!r},
type='event',
)
"""
),
}
)
zip_file_relpath, content = create_python_google_cloud_function(
rule_runner,
Address("src/python/foo/bar", target_name="gcf"),
expected_extra_log_lines=(
" Runtime: python37",
" Handler: handler",
),
)
assert "src.python.foo.bar/gcf.zip" == zip_file_relpath
zipfile = ZipFile(BytesIO(content))
names = set(zipfile.namelist())
assert "mureq/__init__.py" in names
assert "foo/bar/hello_world.py" in names
assert zipfile.read("main.py") == b"from foo.bar.hello_world import handler as handler"
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
class data():
'''
class used to generate cleaned data
'''
def __init__(self, initial_data):
self.initial_data = initial_data
# function used to clean data
def clean_data(self):
df = self.initial_data[['CAMIS', 'BORO', 'GRADE', 'GRADE DATE']] # keep useful columns
df = df.dropna(subset = ['GRADE', 'CAMIS', 'BORO', 'GRADE DATE'])
df = df[df['GRADE'].isin(['A', 'B', 'C'])] # keep only grades A, B, C
df['GRADE DATE'] = pd.to_datetime(df['GRADE DATE'])
return df
|
import copy
from board import Board
from dice import Dice
class Game:
colors = ['blue', 'red', 'green', 'yellow']
max_players = 4
max_rounds = 15
def __init__(self):
self.players = []
self.board = Board()
def add_player(self, player):
if len(self.players) < self.max_players:
self.players.append(player)
player.color = self.colors[len(self.players) - 1]
def start(self):
self.board = Board()
self.winner = self._loop()
print self.winner.name, "wins the game in", self.round, "rounds"
def _loop(self):
for round in range(self.max_rounds):
self.round = round + 1
for player in self.players:
self._turn(player)
if self.board.four_in_a_row():
return player
# if there is no winner after max_rounds, the game ends. The winner is determined by
# counting the houses...
return self._determine_winner()
def _determine_winner(self):
points = {}
for player in self.players:
points[player] = 0
for field in self.board.fields():
for i, player in enumerate(field.players):
points[player] += i + 1
# return the player with the most points
max_points = max(points.values())
winners = [player for player, points in points.iteritems() if points == max_points]
if len(winners) == 1:
return winners[0]
return None
def _turn(self, player):
# pass a copy of the original board, so the players cannot cheat
board = copy.deepcopy(self.board)
dice = Dice()
keep = []
for throw in range(1, 5):
dice.roll(keep)
keep = player.play(dice, board)
# verify the answer of the player
keep = dice.verify(keep)
if len(keep) == 5:
break
# last roll, nothing for the player to decide
dice.roll(keep)
# ask player where to place his/her piece
place = player.place(dice, board)
self.board.place(place, player, dice)
|
def kruskal(G):
E = [(G[u][v], u, v) for u in G for v in G[u]]
T = set()
vertex = set()
E = sorted(E)
for _, u, v in E:
if len(T) == len(G) - 1:
return T
else:
if u and v in vertex:
continue
else:
T.add((u, v))
if u not in vertex:
vertex.add(u)
if v not in vertex:
vertex.add(v)
def main():
a, b, c, d, e, f = range(6)
g = {a: {b: 1, c: 2, d: 3, e: 6}, b: {a: 1, c: 2, e: 4}, c: {a: 2, b: 2, d: 2}, d: {a: 3, c: 2, f: 3},
e: {a: 6, b: 4, f: 2}, f: {d: 3, e: 2}}
seq = kruskal(g)
print(seq)
if __name__ == '__main__':
main()
|
print("my first pgm")
|
from django.db import models
def format_filename(format_string):
"""
Takes a Django format string like
"/a/path/{instance.lol}/"
and returns a function for the FileField
"""
def upload_to(instance, filename):
return format_string.format(obj=instance, filename=filename)
return upload_to
class Patient(models.Model):
user = models.ForeignKey('auth.User')
name = models.CharField(null=True, max_length=200)
gender = models.CharField(max_length=200)
age = models.IntegerField(null=True)
photo = models.ForeignKey('Photo', null=True, blank=True)
locations = models.ManyToManyField('Place', null=True)
symptoms = models.ManyToManyField('Symptom', null=True)
interests = models.ManyToManyField('Interest', null=True)
personal_words = models.TextField(blank=True)
favourite_words = models.TextField(blank=True)
healthcare_location = models.ManyToManyField('HealthcareLocation', null=True)
primary_condition = models.ForeignKey('Condition', related_name="primary_condition", null=True)
other_conditions = models.ManyToManyField('Condition', related_name="other_condition", null=True)
life_plan = models.TextField(blank=True)
skills = models.TextField(blank=True)
favourite_things = models.TextField(blank=True, help_text='IGNORE THIS FIELD PLZ')
other_things = models.TextField(blank=True)
interets = models.TextField(blank=True)
religion = models.TextField(blank=True)
preocupations = models.TextField(blank=True)
what_im_looking_for = models.TextField(blank=True)
def __unicode__(self):
return u'%s (%s)' % (self.name, self.user.username)
class Message(models.Model):
sender = models.ForeignKey('auth.User', related_name="sent_messages")
receiver = models.ForeignKey('auth.User', related_name="received_messages")
created_at = models.DateTimeField(auto_now_add=True)
read_flag = models.BooleanField(default=False)
body = models.TextField()
@property
def sender_photo(self):
if self.sender.patient_set:
return self.sender.patient_set.all()[0].photo
else:
return None
def __unicode__(self):
return u"%s -> %s { %s }" % (self.sender, self.receiver, self.body)
class Place(models.Model):
name = models.CharField(max_length=200)
location = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class HealthcareLocation(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Interest(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Symptom(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Condition(models.Model):
name = models.CharField(max_length=200)
def __unicode__(self):
return self.name
class Photo(models.Model):
created_by = models.ForeignKey('auth.User')
image_url = models.URLField(null=True)
|
'''
Created on Apr 21, 2017
@author: andrewbloomberg
'''
from bs4 import BeautifulSoup
import urllib2
import re
from urllib2 import URLError
import json
if __name__ == '__main__':
file = open('buildings_and_addresses.txt', 'r')
lines = file.readlines()
buildings = []
for line in lines:
split = line.split(":")
buildings.append((split[0], split[1]))
walking_travels = []
for building1 in buildings:
for building2 in buildings:
if(building1 == building2):
pass
else:
place1 = building1[1].replace(" ", "_").rstrip()
place2 = building2[1].replace(" ", "_").rstrip()
travel = "https://maps.googleapis.com/maps/api/directions/json?mode=bicycling&origin=" + place1 + "&destination=" + place2
header = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(travel, headers=header)
page = urllib2.urlopen(req)
data = json.loads(page.read())
try:
time = data["routes"][0]["legs"][0]["duration"]["text"]
walking_travels.append((building1[0], building2[0], time))
print(time)
except IndexError:
print("failure")
pass
with open('data_bicycling.json', 'w') as outfile:
json.dump(walking_travels, outfile, indent = 4, sort_keys= True)
|
#!/usr/bin/python
import os
import sys
import StringIO
import unittest
from mic import msger
def suite():
return unittest.makeSuite(MsgerTest)
class MsgerTest(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = StringIO.StringIO()
sys.stderr = StringIO.StringIO()
msger.set_loglevel('normal')
self.loglevel = msger.LOG_LEVEL
def tearDown(self):
msger.LOG_LEVEL = self.loglevel
sys.stdout = self.stdout
sys.stderr = self.stderr
def testRaw(self):
excepted = "hello\n"
msger.raw("hello")
self.assertEqual(excepted, sys.stdout.getvalue())
def testInfo(self):
excepted = "Info: hello\n"
msger.info("hello")
self.assertEqual(excepted, sys.stdout.getvalue())
def testWarning(self):
excepted = "Warning: hello\n"
msger.warning("hello")
self.assertEqual(excepted, sys.stderr.getvalue())
def testVerbose(self):
excepted = "Verbose: hello\n"
msger.verbose("hello")
self.assertEqual("", sys.stdout.getvalue())
msger.set_loglevel("verbose")
msger.verbose("hello")
self.assertEqual(excepted, sys.stdout.getvalue())
def testDebug(self):
excepted = "Debug: hello\n"
msger.debug("hello")
self.assertEqual("", sys.stdout.getvalue())
msger.set_loglevel("debug")
msger.debug("hello")
self.assertEqual(excepted, sys.stderr.getvalue())
def testLogstderr(self):
excepted = "hello\n"
cwd = os.getcwd()
msger.enable_logstderr(cwd + "/__tmp_err.log")
print >>sys.stderr, "hello"
msger.disable_logstderr()
self.assertEqual(excepted, sys.stderr.getvalue())
def testLoglevel(self):
# test default value
self.assertEqual("normal", msger.get_loglevel())
# test no effect value
msger.set_loglevel("zzzzzz")
self.assertEqual("normal", msger.get_loglevel())
# test effect value
msger.set_loglevel("verbose")
self.assertEqual("verbose", msger.get_loglevel())
msger.set_loglevel("debug")
self.assertEqual("debug", msger.get_loglevel())
msger.set_loglevel("quiet")
self.assertEqual("quiet", msger.get_loglevel())
if __name__ == "__main__":
unittest.main()
|
import argparse
import configparser
DEFAULT_CONFIG = 'config.ini'
import datetime
from pandas_datareader import get_data_tiingo
import pickle
def settings(args):
# Settings configuration, defaults can be changed in the config file
config = configparser.ConfigParser()
if args.config_file is None:
config.read(DEFAULT_CONFIG)
else:
config.read(args.config_file)
if args.kapi is None:
api_key = config['DEFAULT']['tiingoAPI']
else:
api_key = args.kapi
if args.pickle_file is None:
pickle_path = config['DEFAULT']['picklePath']
else:
pickle_path = args.pickle_file
return api_key, pickle_path
def main():
# Argument parser for simple settings changes
parser = argparse.ArgumentParser()
parser.add_argument('query', help='Search query', nargs='*')
parser.add_argument('-s', '--start_date', nargs=3, type=int,
help='Start date for gathering stock data')
parser.add_argument('-e', '--end_date', nargs=3, type=int,
help='End date for gathering stock data')
parser.add_argument('-c', '--config_file',
help='Path to non-default config file')
parser.add_argument('-kapi', '--kapi',
help='Use a different apu key then the default in config')
parser.add_argument('-p', '--pickle_file',
help='use a different pickle file')
args = parser.parse_args()
api_key, pickle_path = settings(args)
if args.start_date is None:
start_date = None
else:
start_date = datetime.datetime(*map(int, args.start_date))
if args.end_date is None:
end_date = None
else:
end_date = datetime.datetime(*map(int, args.end_date))
reader = get_data_tiingo(args.query, api_key=api_key, start=start_date, end=end_date)
with open(pickle_path, 'wb') as file:
pickle.dump(reader, file)
if __name__ == '__main__':
main()
|
""" Extract multiple 2D surfaces from a cube by controllably removing non-crucial amplitude information. """
#pylint: disable=import-error, no-name-in-module, wrong-import-position
from copy import copy
import numpy as np
from scipy.ndimage import sobel
from .base import BaseController
from ..labels import Horizon
from ..metrics import HorizonMetrics
from ...batchflow import HistoSampler, NumpySampler
from ...batchflow import Pipeline, B, V, C, D, P, R
from ...batchflow.models.torch import EncoderDecoder
class Extractor(BaseController):
""" Extract multiple 2D surfaces from a cube by controllably removing non-crucial amplitude information.
More specific, train an autoencoder that compresses seismic data and then restores it:
due to inherit loss of information during this procedure, only the most relevant amplitudes are correctly restored.
In the case of seismic data, that correspond to most distinct surfaces, which are, by definition, horizons.
"""
#pylint: disable=unused-argument, logging-fstring-interpolation, no-member, protected-access
#pylint: disable=access-member-before-definition, attribute-defined-outside-init
def make_sampler(self, dataset, bins=50, side_view=False, **kwargs):
""" Create uniform sampler over present traces. Works inplace.
Plots
-----
Maps with examples of sampled slices of `crop_shape` size, both normalized and not.
"""
_ = kwargs
geometry = dataset.geometries[0]
idx = np.nonzero(geometry.zero_traces != 1)
points = np.hstack([idx[0].reshape(-1, 1),
idx[1].reshape(-1, 1)])
geometry_sampler = HistoSampler(np.histogramdd(points/geometry.cube_shape[:2], bins=bins))
geometry_sampler = geometry_sampler & NumpySampler('u', low=0., high=0.9)
dataset.create_sampler(mode=geometry_sampler)
dataset.modify_sampler('train_sampler', finish=True)
dataset.show_slices(
src_sampler='train_sampler', normalize=False, shape=self.crop_shape,
adaptive_slices=False, side_view=side_view,
cmap='Reds', interpolation='bilinear', show=self.show_plots, figsize=(15, 15),
savepath=self.make_save_path(f'slices_{geometry.short_name}.png')
)
dataset.show_slices(
src_sampler='train_sampler', normalize=True, shape=self.crop_shape,
adaptive_slices=False, side_view=side_view,
cmap='Reds', interpolation='bilinear', show=self.show_plots, figsize=(15, 15),
savepath=self.make_save_path(f'slices_n_{geometry.short_name}.png')
)
def inference_1(self, dataset, heights_range=None, orientation='i', overlap_factor=2,
filter=True, thresholds=None, coverage_threshold=0.5, std_threshold=5.,
metric_threshold=0.5, chunk_size=100, chunk_overlap=0.2, minsize=10000,
filtering_matrix=None, filter_threshold=0, **kwargs):
""" Split area for inference into `big` chunks, inference on each of them, merge results. """
#pylint: disable=redefined-builtin, too-many-branches
_ = kwargs
thresholds = thresholds or np.arange(0.2, 1.0, 0.1)
geometry = dataset.geometries[0]
spatial_ranges, heights_range = self.make_inference_ranges(dataset, heights_range)
config, crop_shape_grid = self.make_inference_config(orientation)
# Actual inference
axis = np.argmin(crop_shape_grid[:2])
iterator = range(spatial_ranges[axis][0], spatial_ranges[axis][1], int(chunk_size*(1 - chunk_overlap)))
storage = [[]]*len(thresholds)
for chunk in self.make_pbar(iterator, desc=f'Inference on {geometry.name}| {orientation}'):
current_spatial_ranges = copy(spatial_ranges)
current_spatial_ranges[axis] = [chunk, min(chunk + chunk_size, spatial_ranges[axis][-1])]
dataset.make_grid(dataset.indices[0], crop_shape_grid,
*current_spatial_ranges, heights_range,
batch_size=self.batch_size,
overlap_factor=overlap_factor,
filtering_matrix=filtering_matrix,
filter_threshold=filter_threshold)
inference_pipeline = (self.get_inference_template() << config) << dataset
inference_pipeline.run(D('size'), n_iters=dataset.grid_iters, bar=self.bar,
bar_desc=f'Inference on {geometry.name} | {orientation}')
assembled_pred = dataset.assemble_crops(inference_pipeline.v('predicted_masks'),
order=config.get('order'))
# Specific to Extractor:
for sign in [-1, +1]:
mask = sign * assembled_pred
for i, threshold in enumerate(thresholds):
chunk_horizons = Horizon.from_mask(mask, dataset.grid_info,
threshold=threshold, minsize=minsize)
storage[i].extend(chunk_horizons)
merged_horizons = []
for horizon_list in storage:
merged = Horizon.merge_list(horizon_list, mean_threshold=2.5,
adjacency=3, minsize=minsize)
merged_horizons.extend(merged)
merged_horizons = Horizon.merge_list(merged_horizons, mean_threshold=0.5)
del storage
filtered_horizons = []
for horizon in merged_horizons:
# CHECK 1: coverage
if horizon.coverage >= coverage_threshold:
# CHECK 2: std
matrix = sobel(np.copy(horizon.matrix))
matrix[horizon.matrix == Horizon.FILL_VALUE] = 0
matrix[abs(matrix) > 100] = 0
std_coeff = np.std(matrix)
if std_coeff <= std_threshold:
# CHECK 3: metric
hm = HorizonMetrics(horizon)
corrs = hm.evaluate('support_corrs', supports=50, agg='nanmean')
if filter:
horizon.filter(filtering_matrix=(corrs <= metric_threshold).astype(np.int32))
if horizon.coverage <= coverage_threshold:
continue
corr_coeff = np.nanmean(corrs)
if corr_coeff >= metric_threshold:
horizon._corr_coeff = corr_coeff
filtered_horizons.append(horizon)
self.log(f'depth: {horizon.h_mean:6.6}; cov: {horizon.coverage:6.6};'
f' std: {std_coeff:6.6}; metric: {corr_coeff:6.6}')
del merged_horizons
horizons = []
for horizon in filtered_horizons:
for i, already_stored in enumerate(horizons):
if abs(horizon.h_mean - already_stored.h_mean) < 2.:
if horizon._corr_coeff > already_stored._corr_coeff:
_ = horizons.pop(i)
horizons.append(horizon)
break
break
else:
horizons.append(horizon)
return horizons
# Pipelines
def load_pipeline(self):
""" Define data loading pipeline.
Following parameters are fetched from pipeline config: `adaptive_slices`, 'grid_src' and `rebatch_threshold`.
"""
return (
Pipeline()
.make_locations(points=D('train_sampler')(self.batch_size),
shape=self.crop_shape, adaptive_slices=C('adaptive_slices'),
side_view=C('side_view', default=False))
.load_cubes(dst='images')
.adaptive_reshape(src='images', shape=self.crop_shape)
.normalize(src='images')
)
def augmentation_pipeline(self):
""" Define augmentation pipeline. """
return (
Pipeline()
.transpose(src='images', order=(1, 2, 0))
.additive_noise(scale=0.005, src='images', dst='images', p=0.3)
.flip(axis=1, src='images', seed=P(R('uniform', 0, 1)), p=0.3)
.rotate(angle=P(R('uniform', -15, 15)),
src='images', p=0.3)
.scale_2d(scale=P(R('uniform', 0.85, 1.15)),
src='images', p=0.3)
.perspective_transform(alpha_persp=P(R('uniform', 25, 50)),
src='images', p=0.3)
.elastic_transform(alpha=P(R('uniform', 35, 45)),
sigma=P(R('uniform', 4, 4.5)),
src='images', p=0.2)
.transpose(src='images', order=(2, 0, 1))
)
def train_pipeline(self):
""" Define model initialization and model training pipeline.
Following parameters are fetched from pipeline config: `model_config`.
"""
return (
Pipeline()
.init_variable('loss_history', [])
.init_model('dynamic', EncoderDecoder, 'model', C('model_config'))
.train_model('model',
fetches='loss',
images=B('images'),
masks=B('images'),
save_to=V('loss_history', mode='a'))
)
def get_train_template(self, **kwargs):
""" Define the whole training procedure pipeline including data loading, augmentation and model training. """
_ = kwargs
return (
self.load_pipeline() +
self.augmentation_pipeline() +
self.train_pipeline()
)
|
"""
Definition of views.
"""
from datetime import datetime
from django.shortcuts import render
from django.http import HttpRequest,JsonResponse, HttpResponse
import requests
from . models import Place
def search_view(request):
# response = requests.get('https://places.ls.hereapi.com/places/v1/autosuggest?at=40.74917,-73.98529&q=chrysler&apiKey=SagftoH9j5tk5u241lXnUAxPHDR_QtRIU1l0LYJO3No')
try:
location = request.GET['name']
if not location:
raise ValueError("EMPTY QUERY")
except Exception as e:
return HttpResponse('BAD_REQUEST: Query parameter name is required.',status=400)
response = requests.get("https://autocomplete.search.hereapi.com/v1/autocomplete?q=" + location + "&apiKey=SagftoH9j5tk5u241lXnUAxPHDR_QtRIU1l0LYJO3No")
places = response.json()['items']
search = []
for item in places:
search.append({
"title":item['title'],
"id": item['id'],
"country": item['address']['countryName'],
"city": item['address']['city'],
})
return JsonResponse({ "places": search })
def location_view(request):
location_place = request.GET['name']
response = requests.get("https://geocode.search.hereapi.com/v1/geocode?q=" + location_place +"&apiKey=SagftoH9j5tk5u241lXnUAxPHDR_QtRIU1l0LYJO3No")
search_result = response.json()['items']
result = []
for item in search_result:
result.append({
"title": item['title'],
"resultType":item['resultType'],
"localityType": item['localityType'],
"position":item['position'],
"mapView": item['mapView']
})
place = Place(name=item['title'], position=str(item['position']['lat'])+","+str(item['position']['lng']))
place.save()
return JsonResponse({"search": result })
def home(request):
"""Renders the home page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/index.html',
{
'title':'Home Page',
'year':datetime.now().year,
}
)
def contact(request):
"""Renders the contact page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/contact.html',
{
'title':'Contact',
'message':'Your contact page.',
'year':datetime.now().year,
}
)
def about(request):
"""Renders the about page."""
assert isinstance(request, HttpRequest)
return render(
request,
'app/about.html',
{
'title':'About',
'message':'Your application description page.',
'year':datetime.now().year,
}
)
|
from django.shortcuts import render
from django.views.generic import ListView
from .models import Event
class MainPage(ListView):
model = Event
template_name = 'mainapp/index.html'
context_object_name = 'events'
|
#!/usr/bin/env python
# coding=utf-8
from torch.utils.data import Dataset
from torchvision import transforms
import skimage.io as io
import torch
import matplotlib.pyplot as plt
class PETADataset(Dataset):
def __init__(self,img_list_path,transform=None):
self.img_list_path = img_list_path
self.transform = transform
self.img_label_dic = {"img_path":[],"label":[]}
fr = open(self.img_list_path,"r")
for line in fr:
strings = line.split(" ")
strings[-1] = strings[-1].split("\n")[0]
# print strings[1]
self.img_label_dic["img_path"].append(strings[0])
self.img_label_dic["label"].append(strings[1:])
def __len__(self):
if(len(self.img_label_dic["img_path"]) != len(self.img_label_dic["label"])):
return 0
return len(self.img_label_dic["img_path"])
def __getitem__(self,idx):
path = "/home/dataset/human_attribute/peta/images/" + self.img_label_dic["img_path"][idx]
img = io.imread(path)
label = [int(i) for i in self.img_label_dic["label"][idx]] #切记这一步要把label转换成int 否则后面不能转换成tensor
if self.transform:
img = self.transform(img)
# print len(imgs)
#labels = np.zeros((imgs.shape[0],1))
#labels[:] = label
label = torch.FloatTensor([label]) #要求必须是longtensor 不能是floattensor
#img = torch.Tensor(img)
#img = img.view(1,img.size()[0],img.size()[1]) #添加上1维度
#print img.shape,label.shape
return img,label.squeeze()
if __name__ =="__main__":
train_dataset = PA100KDataset("/home/dataset/human_attribute/PA100K/train.txt")
for i in range(len(train_dataset)):
image,label = train_dataset[i]
print label.shape
#print image.shape
plt.figure("image")
plt.imshow(image)
#print image[0]
plt.show()
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Artist',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('slug', models.SlugField(max_length=100, unique=True, null=True, blank=True)),
],
options={
'db_table': 'artist',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('slug', models.SlugField(max_length=100, unique=True, null=True, blank=True)),
],
options={
'db_table': 'category',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Designer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('slug', models.SlugField(max_length=100, unique=True, null=True, blank=True)),
],
options={
'db_table': 'designer',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Family',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('slug', models.SlugField(max_length=100, unique=True, null=True, blank=True)),
],
options={
'db_table': 'family',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=250)),
('slug', models.SlugField(max_length=250)),
('age', models.IntegerField(blank=True)),
('year_published', models.IntegerField(blank=True)),
('playing_time', models.IntegerField(blank=True)),
('language_dependence', models.CharField(max_length=250, blank=True)),
('max_players', models.IntegerField(blank=True)),
('min_players', models.IntegerField(blank=True)),
('rank', models.IntegerField(blank=True)),
('image', models.URLField(blank=True)),
('thumbnail', models.URLField(blank=True)),
('bgg_url', models.URLField(blank=True)),
('average_rating', models.FloatField(blank=True)),
('num_ratings', models.IntegerField(blank=True)),
('fans', models.IntegerField(blank=True)),
('total_plays', models.IntegerField(blank=True)),
('plays_this_month', models.IntegerField(blank=True)),
('users_owning', models.IntegerField(blank=True)),
('users_trading', models.IntegerField(blank=True)),
('users_wanting', models.IntegerField(blank=True)),
('bgg', models.IntegerField(unique=True)),
('artist', models.ManyToManyField(to='games.Artist')),
('category', models.ManyToManyField(to='games.Category')),
('designer', models.ManyToManyField(to='games.Designer')),
('family', models.ManyToManyField(to='games.Family')),
],
options={
'ordering': ('rank',),
'db_table': 'game',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Mechanic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('slug', models.SlugField(max_length=100, unique=True, null=True, blank=True)),
],
options={
'db_table': 'mechanic',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Publisher',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('slug', models.SlugField(max_length=100, unique=True, null=True, blank=True)),
],
options={
'db_table': 'publisher',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Subdomain',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, null=True, blank=True)),
('slug', models.SlugField(max_length=100, unique=True, null=True, blank=True)),
],
options={
'db_table': 'subdomain',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='game',
name='mechanic',
field=models.ManyToManyField(to='games.Mechanic'),
preserve_default=True,
),
migrations.AddField(
model_name='game',
name='publisher',
field=models.ManyToManyField(to='games.Publisher'),
preserve_default=True,
),
migrations.AddField(
model_name='game',
name='subdomain',
field=models.ManyToManyField(to='games.Subdomain'),
preserve_default=True,
),
]
|
def crawler(nombreFichero):
print("El fichero ha sido introducido correctamente")
|
from functools import reduce
from collections import Counter
import itertools
puzzle = [int(line.rstrip('\n').split) for line in open('input.txt')]
# with open('input.txt', 'r') as myfile:
# puzzle = myfile.readlines()
# total = 0
# print(reduce(lambda sum, x: sum + x[0] if else sum, puzzle, 0))
# for line in [s for s in myfile.readlines()]:
# good = True
# nums = line.split()
# for i in Counter(nums).values():
# if i > 1:
# good = False
# if good:
# total += 1
# print(total)
eval()
if conditional == "==" {
if operation == "inc" {
//...
} else if operation == "dec" {
//....
}
} else if conditional == "!=" {
if operation == "inc" {
//...
} else if operation == "dec" {
//....
}
} else if conditional == ">=" {
if operation == "inc" {
//...
} else if operation == "dec" {
//....
}
} else if conditional == "<=" {
if operation == "inc" {
//...
} else if operation == "dec" {
//....
}
} else if conditional == ">" {
if operation == "inc" {
//...
} else if operation == "dec" {
//....
}
} else if conditional == "<" {
if operation == "inc" {
//...
} else if operation == "dec" {
//....
}
}
|
from .print import print
def render(t, padding=''):
"""Render an ASCII table"""
# Convert all to strings
t = [[str(cell) for cell in row] for row in t]
# Ensure table has same dimensions
for row in t:
assert len(row) == len(t[0])
# Get column widths
widths = [0 for _ in t[0]]
for row in t:
widths = [max(widths[i], len(cell)) for i, cell in enumerate(row)]
tout = ''
# Make horizontal divider
hdiv = "{padding}+{content}+\n".format(
padding=padding,
content='+'.join(['-' * width for width in widths]))
tout += hdiv
# Make cells
for row in t:
tout += "{padding}|{content}|\n".format(
padding=padding,
content='|'.join([
cell + ' ' * (width - len(cell))
for width, cell in zip(widths, row)]))
tout += hdiv
return tout
def table(t, padding=''):
"""Print an ASCII table"""
print(render(t, padding=padding))
|
import random
import time
import os
import sys
from Quartz.CoreGraphics import CGEventCreateMouseEvent
from Quartz.CoreGraphics import CGEventPost
from Quartz.CoreGraphics import kCGEventMouseMoved
from Quartz.CoreGraphics import kCGEventLeftMouseDown
from Quartz.CoreGraphics import kCGEventLeftMouseDown
from Quartz.CoreGraphics import kCGEventLeftMouseUp
from Quartz.CoreGraphics import kCGMouseButtonLeft
from Quartz.CoreGraphics import kCGHIDEventTap
from Quartz.CoreGraphics import CGEventCreateKeyboardEvent
DOWN_ARROW = 125
UP_ARROW = 126
LEFT_ARROW = 123
RIGHT_ARROW = 124
def mouseEvent(type, posx, posy):
theEvent = CGEventCreateMouseEvent(None, type, (posx,posy), kCGMouseButtonLeft)
CGEventPost(kCGHIDEventTap, theEvent)
def mousemove(posx,posy):
print 'mousemove...'
mouseEvent(kCGEventMouseMoved, posx,posy)
def mouseclick(posx,posy):
print 'mouseclick'
mouseEvent(kCGEventLeftMouseDown, posx,posy)
mouseEvent(kCGEventLeftMouseUp, posx,posy)
def pushZ():
print 'pushz'
cmd = """
osascript -e 'tell application "System Events" to keystroke "z"'
"""
return cmd
def pushDownKey():
print 'pushdownkey'
cmd = """
osascript -e 'tell application "System Events" to key code 125'
"""
return cmd
def pushArrowKey(key):
print 'pusharrowkey:'+str(key)
cmd = """
osascript -e 'tell application "System Events" to key code """+str(key)+""" '
"""
return cmd
def randPushZ():
num = random.randint(1, 7)
time.sleep(num)
os.system(pushZ())
def afkMouseMove():
mouseclick(64, 400)
mousemove(100, 200)
time.sleep(1)
mousemove(123, 212)
time.sleep(1)
mousemove(72, 150)
mouseclick(72, 152)
def main():
# cmd = """
# osascript -e 'tell application "System Events" to key code 125'
# """
# os.system(cmd)
afkcount = 0;
afkrand = random.randint(1, 300)
time.sleep(3)
print 'started...'
print 'afkrand:'+str(afkrand)
while 1 == 1:
pushArrowKey(RIGHT_ARROW)
pushArrowKey(RIGHT_ARROW)
os.system(pushZ())
os.system(pushZ())
# os.system(pushDownKey())
# os.system(pushDownKey())
# os.system(pushDownKey())
# os.system(pushDownKey())
os.system(pushArrowKey(DOWN_ARROW))
os.system(pushArrowKey(DOWN_ARROW))
os.system(pushArrowKey(DOWN_ARROW))
os.system(pushArrowKey(DOWN_ARROW))
randPushZ()
randPushZ()
randPushZ()
afkcount += 1
print str(afkcount)
if (afkcount >= afkrand):
print 'afk test'
afkcount = 0
afkrand = random.randint(1, 300)
afkMouseMove()
pushArrowKey(LEFT_ARROW)
time.sleep(1)
pushArrowKey(LEFT_ARROW)
time.sleep(1)
pushArrowKey(LEFT_ARROW)
time.sleep(1)
pushArrowKey(UP_ARROW)
time.sleep(1)
pushArrowKey(UP_ARROW)
time.sleep(1)
pushArrowKey(DOWN_ARROW)
time.sleep(1)
pushArrowKey(DOWN_ARROW)
time.sleep(1)
pushArrowKey(RIGHT_ARROW)
time.sleep(1)
pushArrowKey(RIGHT_ARROW)
if __name__ == '__main__':
main()
|
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.build import MODEL_REGISTRY
from models.weight_init_helper import init_weights
from utils.models import pad_if_necessary
from utils.models import pad_if_necessary_all
from models.Unet3D import Encoder as Unet3DEncoder, Decoder as Unet3DDecoder, SegHead as Unet3DSegHead
from models.Unet3D import Unet3D, BasicBlock, ContextBlock, is_3d
from models.NestedUnet3D import ModulationBlock
IS_3D = True
class MLPModule(nn.Sequential):
def __init__(self, in_channels, out_channels, norm_type='batch'):
if norm_type == 'batch':
norm_layer = nn.BatchNorm1d(num_features=out_channels, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = nn.InstanceNorm1d(num_features=out_channels, affine=True, track_running_stats=True)
elif norm_type == 'layer':
norm_layer = nn.LayerNorm(normalized_shape =out_channels, elementwise_affine=True)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError()
super().__init__(
nn.Linear(in_channels, out_channels),
norm_layer,
nn.ReLU(),
nn.Linear(out_channels, in_channels),
)
class ChannelAttentionModule(nn.Module):
def __init__(self, gate_channels, self_attention_attr):
super().__init__()
self.gate_channels = gate_channels
self.pooling_type = self_attention_attr.CHANNEL_POOLING_TYPE
assert self.pooling_type in ('max', 'average', 'pa', 'lse'), 'p is undefined'
self.reduction_ratio = self_attention_attr.REDUCTION_RATIO
self.norm_type = self_attention_attr.NORM_TYPE
assert self.norm_type in ('batch', 'instance', 'layer', 'none')
self._create_net()
def _create_net(self):
self.mlp_layer = MLPModule(
in_channels=self.gate_channels,
out_channels=self.gate_channels // self.reduction_ratio,
norm_type=self.norm_type,
)
def forward_pooling(self, pooling_type, x):
B, C, D, H, W = x.shape
if pooling_type == 'max':
x = F.max_pool3d(x, kernel_size=(D, H, W))
elif pooling_type == 'average':
x = F.avg_pool3d(x, kernel_size=(D, H, W))
elif pooling_type == 'pa':
x = x.view(B * C, -1)
x = F.lp_pool1d(x, 2, kernel_size=(D * H * W))
pass
elif pooling_type == 'lse':
x = x.view(B, C, -1)
x = torch.logsumexp(x, dim=-1, keepdim=True)
pass
else:
raise NotImplementedError()
x = x.view(B, -1)
x = self.mlp_layer(x)
return x
def forward(self, x):
B, C, D, H, W = x.shape
x_out = self.forward_pooling(self.pooling_type, x)
x_out = x_out.view(B, C, 1, 1, 1)
x_out = x_out.expand_as(x)
return x_out
class SpatialAttentionModule(nn.Module):
def __init__(self, gate_channels, self_attention_attr):
super().__init__()
self.gate_channels = gate_channels
self.kernel_size = tuple([self_attention_attr.SPATIAL_KERNEL_SIZE] * 3)
self.num_conv_blocks = self_attention_attr.NUM_CONV_BLOCKS
self.dilation = tuple([self_attention_attr.DILATION] * 3)
self.reduction_ratio = self_attention_attr.REDUCTION_RATIO
self._create_net(self_attention_attr)
@staticmethod
def get_layer_name(i, postfix=''):
return 'bam_sam_layer{:03}{}'.format(i, postfix)
def _create_net(self, self_attention_attr):
in_channels, out_channels = self.gate_channels, self.gate_channels // self.reduction_ratio
self.conv_layers = nn.Sequential()
self.conv_layers.add_module(
'base',
BasicBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=(1, 1, 1),
)
)
for i in range(self.num_conv_blocks):
self.conv_layers.add_module(
self.get_layer_name(i),
BasicBlock(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.kernel_size,
dilation=self.dilation,
)
)
self.conv_layers.add_module(
'last',
nn.Conv3d(in_channels=out_channels, out_channels=1, kernel_size=(1, 1, 1), bias=False)
)
def forward(self, x):
x_out = self.conv_layers(x)
x_out = x_out.expand_as(x)
return x_out
class ModulationAggregationBlock(nn.Module):
def __init__(self, gate_channels, modulation_type, concat_reduction_factor=2, conv_only=True):
super().__init__()
self.gate_channels = gate_channels
self.modulation_type = modulation_type
self.concat_reduction_factor = concat_reduction_factor
self.conv_only = conv_only
self._create_net()
def _create_conv(self, in_channels, out_channels):
if self.conv_only:
return nn.Conv3d(in_channels, out_channels, kernel_size=(1, 1, 1), bias=False)
else:
return BasicBlock(in_channels, out_channels, kernel_size=(1, 1, 1))
def _create_concat_squeeze(self):
in_channels, out_channels = self.concat_reduction_factor * self.gate_channels, self.gate_channels
return self._create_conv(in_channels, out_channels)
def _create_net(self):
self.attention_modulation = ModulationBlock(self.modulation_type)
if self.modulation_type == 'concatenation':
self.concat_reduc_conv_layer = self._create_concat_squeeze()
def forward(self, x):
x = self.attention_modulation(x)
if self.modulation_type == 'concatenation':
x = self.concat_reduc_conv_layer(x)
return x
class GAMBlock(nn.Module):
def __init__(self, gate_channels, self_attention_attr):
super().__init__()
self.gate_channels = gate_channels
self.channel, self.spatial = self_attention_attr.CHANNEL, self_attention_attr.SPATIAL
self.cm_modulation_type = self_attention_attr.CROSS_MODAL_MODULATION_TYPE
self.ref_modulation_type = self_attention_attr.REF_MODULATION_TYPE
self.residual = self_attention_attr.RESIDUAL
assert self.channel or self.spatial, 'either modalities must be on'
self._create_net(self_attention_attr)
def _create_net(self, self_attention_attr):
if self.channel:
self.channel_attention_layer = ChannelAttentionModule(self.gate_channels, self_attention_attr)
if self.spatial:
self.spatial_attention_layer = SpatialAttentionModule(self.gate_channels, self_attention_attr)
if self.channel is self.spatial is True:
self.cm_att_mod_agg = ModulationAggregationBlock(self.gate_channels, self.cm_modulation_type)
self.attention_normalization = nn.Sigmoid()
self.ref_att_mod_agg = ModulationAggregationBlock(self.gate_channels, self.ref_modulation_type)
def forward(self, x):
x_channel_attention_map, x_spatial_attention_map, x_attention_map = None, None, None
if self.channel:
x_channel_attention_map = x_attention_map = self.channel_attention_layer(x)
if self.spatial:
x_spatial_attention_map = x_attention_map = self.spatial_attention_layer(x)
if self.channel is self.spatial is True:
x_attention_map = self.cm_att_mod_agg((x_channel_attention_map, x_spatial_attention_map))
x_attention_map = self.attention_normalization(x_attention_map)
x_attention_map = self.ref_att_mod_agg((x, x_attention_map))
x = (x + x_attention_map) if self.residual else x
return x
|
#Funcoes e variaveis
#1 - Lista de compras
carrinho = []
#2 - Criar funcao para adicionar itens
def adiciona_item(valor):
carrinho.append(valor)
#2 - funcao total carrinho
def total_carrinho(lista_compras):
for intem in lista_compras:
soma += soma
retorn soma
|
import sys, logging
import subprocess
import json
import binascii
import numpy as np
from bitcoin import SelectParams
from bitcoin.core import b2x
from bitcoin.core.script import *
from bitcoin.wallet import *
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s %(levelname)-2s %(name)-2s %(module)s@%(lineno)d.%(funcName)s %(message)s")
class BitcoinTx():
# bitcoin-tx [options] <hex-tx> [commands] Update hex-encoded bitcoin transaction
# bitcoin-tx [options] -create [commands] Create hex-encoded bitcoin transaction
@staticmethod
def __execute_and_get_output(commands):
commands = [c.decode("utf-8") if not isinstance(c, str) else c for c in commands]
logging.getLogger("BitcoinTx").debug("executing: %s" % ' '.join(commands))
return subprocess.check_output(commands)[:-1] # Remove last character '\n'
@staticmethod
def get_nsequence(number_of, day_based = False):
'''
Bit (1 << 22) determines if the relative lock-time is time-based or block based:
If the bit is set, the relative lock-time specifies a timespan in units of 512 seconds granularity.
The timespan starts from the median-time-past of the output's previous block,
and ends at the MTP of the previous block.
If the bit is not set, the relative lock-time specifies a number of blocks.
'''
# nSequence is a 32bit string
nSequence = 0
if day_based:
nSequence = nSequence | (1 << 22);
number_of_seconds = number_of*24*60*60;
timespan = number_of_seconds / 512;
nSequence = np.uint32(nSequence | timespan);
else:
nSequence = np.uint32(nSequence | number_of);
logging.getLogger("BitcoinTx").debug("number_of: %i, day_based: %s, nSequence: %i \n" % (number_of, day_based, nSequence));
return nSequence;
def __init__(self, chain="regtest"):
self.logger = logging.getLogger("BitcoinTx")
self.bitcointx = ["bitcoin-tx"]
self.bitcoincli = ["bitcoin-cli"]
if chain=="regtest":
SelectParams('regtest')
self.bitcointx += ["-regtest"]
self.bitcoincli += ["-regtest"]
elif chain=="testnet":
SelectParams('testnet')
self.bitcointx += ["-testnet"]
self.bitcoincli += ["-testnet"]
elif chain=="mainnet":
SelectParams('mainnet')
pass
else:
raise ValueError("please specify a chain using mainnet, testnet or regtest (default)")
commands = self.bitcointx + ['-create']
self.hex = BitcoinTx.__execute_and_get_output(commands)
def json(self):
return json.loads(self.__str__())
def __str__(self):
commands = self.bitcointx + ['-json', self.hex]
return BitcoinTx.__execute_and_get_output(commands).decode("utf-8")
def txid(self):
return self.json()['txid']
def delete_in(input_index):
# delin=N
# Delete input N from TX
commands = self.bitcointx
def add_input(self, txid, vout, sequence_number):
# bitcoin-tx [options] <hex-tx> [commands] Update hex-encoded bitcoin transaction
# in=TXID:VOUT(:SEQUENCE_NUMBER)
# Add input to TX
commands = self.bitcointx + [self.hex, "in="+txid+":"+str(vout)+":"+str(sequence_number)]
self.hex = BitcoinTx.__execute_and_get_output(commands)
return self.hex
def add_output_address(self, value, address):
# outaddr=VALUE:ADDRESS
# Add address-based output to TX
commands = self.bitcointx + [self.hex, "outaddr="+str(value)+":"+address]
self.hex = BitcoinTx.__execute_and_get_output(commands)
return self.hex
def add_output_redeem_script(self, value, script):
scriptPubKey = CScript(script).to_p2sh_scriptPubKey()
p2sh_address = CBitcoinAddress.from_scriptPubKey(scriptPubKey)
return self.add_output_address(value, str(p2sh_address))
# OP_RETURN
def add_output_data(self, data):
# outdata=[VALUE:]DATA
# Add data-based output to TX
# decode with binascii.unhexlify('6120736f726574')
datahex = binascii.hexlify( data.encode('utf-8') ).decode('utf-8')
commands = self.bitcointx + [self.hex, "outdata="+datahex]
self.hex = BitcoinTx.__execute_and_get_output(commands)
return self.hex
def sign(self):
self.logger.warning("TODO: use bitcoinrpc")
commands = self.bitcoincli + ["signrawtransaction", self.hex]
self.hex = json.loads(BitcoinTx.__execute_and_get_output(commands).decode('utf-8'))["hex"]
|
from __future__ import division, print_function
import abc
import logging
import json
import math
import mimetypes
import multiprocessing
import multiprocessing.pool
import os
import os.path as osp
import subprocess
import sys
import tempfile
import six
import numpy
import six
import sklearn.cluster
from smqtk.algorithms.descriptor_generator import DescriptorGenerator
from smqtk.representation.data_element.file_element import DataFileElement
from smqtk.utils import file_utils, SimpleTimer, video_utils
from smqtk.utils.string_utils import partition_string
from smqtk.utils.video_utils import get_metadata_info
# Attempt importing utilities module. If not, flag descriptor as unusable.
from . import utils
from six import next
# Requires FLANN bindings
try:
import pyflann
except ImportError:
pyflann = None
# noinspection PyPep8Naming
class ColorDescriptor_Base (DescriptorGenerator):
"""
Simple implementation of ColorDescriptor feature descriptor utility for
feature generation over images and videos.
This was started as an attempt at gaining a deeper understanding of what's
going on with this feature descriptor's use and how it applied to later use
in an indexer.
Codebook generated via kmeans given a set of input data. FLANN index model
used for quantization, built using auto-tuning (picks the best indexing
algorithm of linear, kdtree, kmeans, or combined), and using the Chi-Squared
distance function.
"""
# Name/Path to the colorDescriptor executable to use. By default we assume
# its accessible on the PATH.
EXE = 'colorDescriptor'
@classmethod
def is_usable(cls):
"""
Check whether this descriptor is available for use.
:return: Boolean determination of whether this implementation is usable.
:rtype: bool
"""
log = logging.getLogger('.'.join([ColorDescriptor_Base.__module__,
ColorDescriptor_Base.__name__,
"is_usable"]))
if not hasattr(ColorDescriptor_Base, "_is_usable_cache"):
# Base assumption that it is available, now lets prove its false.
ColorDescriptor_Base._is_usable_cache = True
if pyflann is None:
# missing FLANN bindings dependency
log.debug("could not import FLANN bindings (pyflann)")
ColorDescriptor_Base._is_usable_cache = False
else:
# Check for colorDescriptor executable on the path
log_file = open(tempfile.mkstemp()[1], 'w')
try:
# This should try to print out the CLI options return with
# code 1.
subprocess.call([cls.EXE, '--version'],
stdout=log_file, stderr=log_file)
# it is known that colorDescriptor has a return code of 1 no
# matter if it exited "successfully" or not, which is not
# helpful, I know.
except OSError:
log.debug("Could not locate colorDescriptor executable. "
"Make sure that its on the PATH! See "
"smqtk/descriptor_generator/colordescriptor/"
"INSTALL.md for help.")
# If there was anything written to the log file, output it.
log_file.flush()
if log_file.tell():
with open(log_file.name) as f:
log.debug("STDOUT and STDERR output from attempted "
"colorDescriptor call:\n%s", f.read())
ColorDescriptor_Base._is_usable_cache = False
finally:
log_file.close()
os.remove(log_file.name)
# Checking if DescriptorIO is importable
if not utils.has_colordescriptor_module():
log.debug("Could not import DescriptorIO. Make sure that "
"the colorDescriptor package is on the "
"PYTHONPATH! See "
"smqtk/descriptor_generator/colordescriptor/"
"INSTALL.md for help.")
ColorDescriptor_Base._is_usable_cache = False
return ColorDescriptor_Base._is_usable_cache
def __init__(self, model_directory, work_directory,
model_gen_descriptor_limit=1000000,
kmeans_k=1024, flann_distance_metric='hik',
flann_target_precision=0.95,
flann_sample_fraction=0.75,
flann_autotune=False,
random_seed=None, use_spatial_pyramid=False,
parallel=None):
"""
Initialize a new ColorDescriptor interface instance.
:param model_directory: Path to the directory to store/read data model
files on the local filesystem. Relative paths are treated relative
to the current working directory.
:type model_directory: str | unicode
:param work_directory: Path to the directory in which to place
temporary/working files. Relative paths are treated relative to
the current working directory.
:type work_directory: str | unicode
:param model_gen_descriptor_limit: Total number of descriptors to use
from input data to generate codebook model. Fewer than this may be
used if the data set is small, but if it is greater, we randomly
sample down to this count (occurs on a per element basis).
:type model_gen_descriptor_limit: int
:param kmeans_k: Centroids to generate. Default of 1024
:type kmeans_k: int
:param flann_distance_metric: Distance function to use in FLANN
indexing. See FLANN documentation for available distance function
types (under the MATLAB section reference for valid string
identifiers)
:type flann_distance_metric: str
:param flann_target_precision: Target precision percent to tune index
for. Default is 0.95 (95% accuracy). For some codebooks, if this is
too close to 1.0, the FLANN library may non-deterministically
overflow, causing an infinite loop requiring a SIGKILL to stop.
:type flann_target_precision: float
:param flann_sample_fraction: Fraction of input data to use for index
auto tuning. Default is 0.75 (75%).
:type flann_sample_fraction: float
:param flann_autotune: Have FLANN module use auto-tuning algorithm to
find an optimal index representation and parameter set.
:type flann_autotune: bool
:param random_seed: Optional value to seed components requiring random
operations.
:type random_seed: None or int
:param use_spatial_pyramid: Use spacial pyramids when quantizing low
level descriptors during feature computation.
:type use_spatial_pyramid: bool
:param parallel: Specific number of threads/cores to use when performing
asynchronous activities. When None we will use all cores available.
:type parallel: int | None
"""
super(ColorDescriptor_Base, self).__init__()
# TODO: Because of the FLANN library non-deterministic overflow issue,
# an alternative must be found before this can be put into
# production. Suggest saving/using sk-learn MBKMeans class? Can
# the class be regenerated from an existing codebook?
self._model_dir = model_directory
self._work_dir = work_directory
self._model_gen_descriptor_limit = model_gen_descriptor_limit
self._kmeans_k = int(kmeans_k)
self._flann_distance_metric = flann_distance_metric
self._flann_target_precision = float(flann_target_precision)
self._flann_sample_fraction = float(flann_sample_fraction)
self._flann_autotune = bool(flann_autotune)
self._use_sp = use_spatial_pyramid
self._rand_seed = None if random_seed is None else int(random_seed)
if self._rand_seed is not None:
numpy.random.seed(self._rand_seed)
self.parallel = parallel
# Cannot pre-load FLANN stuff because odd things happen when processing/
# threading. Loading index file is fast anyway.
self._codebook = None
if self.has_model:
self._codebook = numpy.load(self.codebook_filepath)
def get_config(self):
"""
Return a JSON-compliant dictionary that could be passed to this class's
``from_config`` method to produce an instance with identical
configuration.
Keys of the returned dictionary are based on the initialization argument
names.
:return: JSON type compliant configuration dictionary.
:rtype: dict
"""
return {
"model_directory": self._model_dir,
"work_directory": self._work_dir,
"model_gen_descriptor_limit": self._model_gen_descriptor_limit,
"kmeans_k": self._kmeans_k,
"flann_distance_metric": self._flann_distance_metric,
"flann_target_precision": self._flann_target_precision,
"flann_sample_fraction": self._flann_sample_fraction,
"flann_autotune": self._flann_autotune,
"random_seed": self._rand_seed,
"use_spatial_pyramid": self._use_sp,
}
@property
def name(self):
if self._use_sp:
return '_'.join([self.__class__.__name__, 'spatial'])
else:
return self.__class__.__name__
@property
def codebook_filepath(self):
file_utils.safe_create_dir(self._model_dir)
return osp.join(self._model_dir,
"%s.codebook.npy" % (self.descriptor_type(),))
@property
def flann_index_filepath(self):
file_utils.safe_create_dir(self._model_dir)
return osp.join(self._model_dir,
"%s.flann_index.dat" % (self.descriptor_type(),))
@property
def flann_params_filepath(self):
file_utils.safe_create_dir(self._model_dir)
return osp.join(self._model_dir,
"%s.flann_params.json" % (self.descriptor_type(),))
@property
def has_model(self):
has_model = (osp.isfile(self.codebook_filepath) and
osp.isfile(self.flann_index_filepath))
# Load the codebook model if not already loaded. FLANN index will be
# loaded when needed to prevent thread/subprocess memory issues.
if self._codebook is None and has_model:
self._codebook = numpy.load(self.codebook_filepath)
return has_model
@property
def temp_dir(self):
return file_utils.safe_create_dir(osp.join(self._work_dir,
'temp_files'))
@abc.abstractmethod
def descriptor_type(self):
"""
:return: String descriptor type as used by colorDescriptor
:rtype: str
"""
return
@abc.abstractmethod
def _generate_descriptor_matrices(self, data_set, **kwargs):
"""
Generate info and descriptor matrices based on ingest type.
:param data_set: Iterable of data elements to generate combined info
and descriptor matrices for.
:type item_iter: collections.Set[smqtk.representation.DataElement]
:param limit: Limit the number of descriptor entries to this amount.
:type limit: int
:return: Combined info and descriptor matrices for all base images
:rtype: (numpy.core.multiarray.ndarray, numpy.core.multiarray.ndarray)
"""
pass
def _get_checkpoint_dir(self, data):
"""
The directory that contains checkpoint material for a given data element
:param data: Data element
:type data: smqtk.representation.DataElement
:return: directory path
:rtype: str
"""
d = osp.join(self._work_dir, *partition_string(str(data.uuid()), 10))
file_utils.safe_create_dir(d)
return d
def _get_standard_info_descriptors_filepath(self, data, frame=None):
"""
Get the standard path to a data element's computed descriptor output,
which for colorDescriptor consists of two matrices: info and descriptors
:param data: Data element
:type data: smqtk.representation.DataElement
:param frame: frame within the data file
:type frame: int
:return: Paths to info and descriptor checkpoint numpy files
:rtype: (str, str)
"""
d = self._get_checkpoint_dir(data)
if frame is not None:
return (
osp.join(d, "%s.info.%d.npy" % (str(data.uuid()), frame)),
osp.join(d, "%s.descriptors.%d.npy" % (str(data.uuid()), frame))
)
else:
return (
osp.join(d, "%s.info.npy" % str(data.uuid())),
osp.join(d, "%s.descriptors.npy" % str(data.uuid()))
)
def _get_checkpoint_feature_file(self, data):
"""
Return the standard path to a data element's computed feature checkpoint
file relative to our current working directory.
:param data: Data element
:type data: smqtk.representation.DataElement
:return: Standard path to where the feature checkpoint file for this
given data element.
:rtype: str
"""
if self._use_sp:
return osp.join(self._get_checkpoint_dir(data),
"%s.feature.sp.npy" % str(data.uuid()))
else:
return osp.join(self._get_checkpoint_dir(data),
"%s.feature.npy" % str(data.uuid()))
def generate_model(self, data_set):
"""
Generate this feature detector's data-model given a file ingest. This
saves the generated model to the currently configured data directory.
For colorDescriptor, we generate raw features over the ingest data,
compute a codebook via kmeans, and then create an index with FLANN via
the "autotune" or linear algorithm to intelligently pick the fastest
indexing method.
:param data_set: Set of input data elements to generate the model
with.
:type data_set: collections.Set[smqtk.representation.DataElement]
"""
if self.has_model:
self._log.warn("ColorDescriptor model for descriptor type '%s' "
"already generated!", self.descriptor_type())
return
# Check that input data is value for processing through colorDescriptor
valid_types = self.valid_content_types()
invalid_types_found = set()
for di in data_set:
if di.content_type() not in valid_types:
invalid_types_found.add(di.content_type())
if invalid_types_found:
self._log.error("Found one or more invalid content types among "
"input:")
for t in sorted(invalid_types_found):
self._log.error("\t- '%s", t)
raise ValueError("Discovered invalid content type among input "
"data: %s" % sorted(invalid_types_found))
if not osp.isfile(self.codebook_filepath):
self._log.info("Did not find existing ColorDescriptor codebook for "
"descriptor '%s'.", self.descriptor_type())
# generate descriptors
with SimpleTimer("Generating descriptor matrices...",
self._log.info):
descriptors_checkpoint = osp.join(self._work_dir,
"model_descriptors.npy")
if osp.isfile(descriptors_checkpoint):
self._log.debug("Found existing computed descriptors work "
"file for model generation.")
descriptors = numpy.load(descriptors_checkpoint)
else:
self._log.debug("Computing model descriptors")
_, descriptors = \
self._generate_descriptor_matrices(
data_set,
limit=self._model_gen_descriptor_limit
)
_, tmp = tempfile.mkstemp(dir=self._work_dir,
suffix='.npy')
self._log.debug("Saving model-gen info/descriptor matrix")
numpy.save(tmp, descriptors)
os.rename(tmp, descriptors_checkpoint)
# Compute centroids (codebook) with kmeans
with SimpleTimer("Computing sklearn.cluster.MiniBatchKMeans...",
self._log.info):
kmeans_verbose = self._log.getEffectiveLevel <= logging.DEBUG
kmeans = sklearn.cluster.MiniBatchKMeans(
n_clusters=self._kmeans_k,
init_size=self._kmeans_k*3,
random_state=self._rand_seed,
verbose=kmeans_verbose,
compute_labels=False,
)
kmeans.fit(descriptors)
codebook = kmeans.cluster_centers_
with SimpleTimer("Saving generated codebook...", self._log.debug):
numpy.save(self.codebook_filepath, codebook)
else:
self._log.info("Found existing codebook file.")
codebook = numpy.load(self.codebook_filepath)
# create FLANN index
# - autotune will force select linear search if there are < 1000 words
# in the codebook vocabulary.
pyflann.set_distance_type(self._flann_distance_metric)
flann = pyflann.FLANN()
if self._log.getEffectiveLevel() <= logging.DEBUG:
log_level = 'info'
else:
log_level = 'warning'
with SimpleTimer("Building FLANN index...", self._log.info):
p = {
"target_precision": self._flann_target_precision,
"sample_fraction": self._flann_sample_fraction,
"log_level": log_level,
}
if self._flann_autotune:
p['algorithm'] = "autotuned"
if self._rand_seed is not None:
p['random_seed'] = self._rand_seed
flann_params = flann.build_index(codebook, **p)
with SimpleTimer("Saving FLANN index to file...", self._log.debug):
# Save FLANN index data binary
flann.save_index(self.flann_index_filepath)
# Save out log of parameters
with open(self.flann_params_filepath, 'w') as ofile:
json.dump(flann_params, ofile, indent=4, sort_keys=True)
# save generation results to class for immediate feature computation use
self._codebook = codebook
def _compute_descriptor(self, data):
"""
Given some kind of data, process and return a feature vector as a Numpy
array.
:raises RuntimeError: Feature extraction failure of some kind.
:param data: Some kind of input data for the feature descriptor. This is
descriptor dependent.
:type data: smqtk.representation.DataElement
:return: Feature vector. This is a histogram of N bins where N is the
number of centroids in the codebook. Bin values is percent
composition, not absolute counts.
:rtype: numpy.ndarray
"""
super(ColorDescriptor_Base, self)._compute_descriptor(data)
checkpoint_filepath = self._get_checkpoint_feature_file(data)
# if osp.isfile(checkpoint_filepath):
# return numpy.load(checkpoint_filepath)
if not self.has_model:
raise RuntimeError("No model currently loaded! Check the existence "
"or, or generate, model files!\n"
"Codebook path: %s\n"
"FLANN Index path: %s"
% (self.codebook_filepath,
self.flann_index_filepath))
self._log.debug("Computing descriptors for data UID[%s]...",
data.uuid())
info, descriptors = self._generate_descriptor_matrices({data})
# Load FLANN components
pyflann.set_distance_type(self._flann_distance_metric)
flann = pyflann.FLANN()
flann.load_index(self.flann_index_filepath, self._codebook)
if not self._use_sp:
###
# Codebook Quantization
#
# - loaded the model at class initialization if we had one
self._log.debug("Quantizing descriptors")
try:
# If the distance method is HIK, we need to treat it special
# since that method produces a similarity score, not a distance
# score.
#
if self._flann_distance_metric == 'hik':
# This searches for all NN instead of minimum between n and
# the number of descriptors and keeps the last one because
# hik is a similarity score and not a distance, which is
# also why the values in dists is flipped below.
#: :type: (numpy.ndarray, numpy.ndarray)
idxs = flann.nn_index(descriptors,
self._codebook.shape[0])[0]
# Only keep the last index for each descriptor return
idxs = numpy.array([i_array[-1] for i_array in idxs])
else:
#: :type: (numpy.ndarray, numpy.ndarray)
idxs = flann.nn_index(descriptors, 1)[0]
except AssertionError:
self._log.error("Codebook shape : %s", self._codebook.shape)
self._log.error("Descriptor shape: %s", descriptors.shape)
raise
# Create histogram
# - Using explicit bin slots to prevent numpy from automatically
# creating tightly constrained bins. This would otherwise cause
# histograms between two inputs to be non-comparable (unaligned
# bins).
# - See numpy note about ``bins`` to understand why the +1 is
# necessary
# - Learned from spatial implementation that we could feed multiple
# neighbors per descriptor into here, leading to a more populated
# histogram.
# - Could also possibly weight things based on dist from
# descriptor?
#: :type: numpy.core.multiarray.ndarray
h = numpy.histogram(idxs, # indices are all integers
bins=numpy.arange(self._codebook.shape[0]+1))[0]
# self._log.debug("Quantization histogram: %s", h)
# Normalize histogram into relative frequencies
# - Not using /= on purpose. h is originally int32 coming out of
# histogram. /= would keep int32 type when we want it to be
# transformed into a float type by the division.
if h.sum():
# noinspection PyAugmentAssignment
h = h / float(h.sum())
else:
h = numpy.zeros(h.shape, h.dtype)
# self._log.debug("Normalized histogram: %s", h)
else:
###
# Spatial Pyramid Quantization
#
self._log.debug("Quantizing descriptors using spatial pyramid")
##
# Quantization factor - number of nearest codes to be saved
q_factor = 10
##
# Concatenating spatial information to descriptor vectors to format:
# [ x y <descriptor> ]
self._log.debug("Creating combined descriptor matrix")
m = numpy.concatenate((info[:, :2],
descriptors), axis=1)
##
# Creating quantized vectors, consisting vector:
# [ x y c_1 ... c_qf dist_1 ... dist_qf ]
# which has a total size of 2+(qf*2)
#
# Sangmin's code included the distances in the quantized vector, but
# then also passed this vector into numpy's histogram function with
# integral bins, causing the [0,1] to be heavily populated, which
# doesn't make sense to do.
# idxs, dists = flann.nn_index(m[:, 2:], q_factor)
# q = numpy.concatenate([m[:, :2], idxs, dists], axis=1)
self._log.debug("Computing nearest neighbors")
if self._flann_distance_metric == 'hik':
# Query full ordering of code indices
idxs = flann.nn_index(m[:, 2:], self._codebook.shape[0])[0]
# Extract the right-side block for use in building histogram
# Order doesn't actually matter in the current implementation
# because index relative position is not being weighted.
idxs = idxs[:, -q_factor:]
else:
idxs = flann.nn_index(m[:, 2:], q_factor)[0]
self._log.debug("Creating quantization matrix")
# This matrix consists of descriptor (x,y) position + near code
# indices.
q = numpy.concatenate([m[:, :2], idxs], axis=1)
##
# Build spatial pyramid from quantized matrix
self._log.debug("Building spatial pyramid histograms")
hist_sp = self._build_sp_hist(q, self._codebook.shape[0])
##
# Combine each quadrants into single vector
self._log.debug("Combining global+thirds into final histogram.")
f = sys.float_info.min # so as we don't div by 0 accidentally
def rf_norm(hist):
return hist / (float(hist.sum()) + f)
h = numpy.concatenate([rf_norm(hist_sp[0]),
rf_norm(hist_sp[5]),
rf_norm(hist_sp[6]),
rf_norm(hist_sp[7])],
axis=1)
# noinspection PyAugmentAssignment
h /= h.sum()
self._log.debug("Saving checkpoint feature file")
if not osp.isdir(osp.dirname(checkpoint_filepath)):
file_utils.safe_create_dir(osp.dirname(checkpoint_filepath))
numpy.save(checkpoint_filepath, h)
return h
@staticmethod
def _build_sp_hist(feas, bins):
"""
Build spatial pyramid from quantized data. We expect feature matrix
to be in the following format:
[[ x y c_1 ... c_n ]
[ ... ]
... ]
NOTES:
- See encode_FLANN.py for original implementation this was adapted
from.
:param feas: Feature matrix with the above format.
:type feas: numpy.core.multiarray.ndarray
:param bins: number of bins for the spatial histograms. This should
probably be the size of the codebook used when generating quantized
descriptors.
:type bins: int
:return: Matrix of 8 rows representing the histograms for the different
spatial regions
:rtype: numpy.core.multiarray.ndarray
"""
bins = numpy.arange(0, bins+1)
cordx = feas[:, 0]
cordy = feas[:, 1]
feas = feas[:, 2:]
# hard quantization
# global histogram
#: :type: numpy.core.multiarray.ndarray
hist_sp_g = numpy.histogram(feas, bins=bins)[0]
hist_sp_g = hist_sp_g[numpy.newaxis]
# 4 quadrants
# noinspection PyTypeChecker
midx = numpy.ceil(cordx.max()/2)
# noinspection PyTypeChecker
midy = numpy.ceil(cordy.max()/2)
lx = cordx < midx
rx = cordx >= midx
uy = cordy < midy
dy = cordy >= midy
# logging.error("LXUI: %s,%s", lx.__repr__(), uy.__repr__())
# logging.error("Length LXUI: %s,%s", lx.shape, uy.shape)
# logging.error("feas dimensions: %s", feas.shape)
#: :type: numpy.core.multiarray.ndarray
hist_sp_q1 = numpy.histogram(feas[lx & uy], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_q2 = numpy.histogram(feas[rx & uy], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_q3 = numpy.histogram(feas[lx & dy], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_q4 = numpy.histogram(feas[rx & dy], bins=bins)[0]
hist_sp_q1 = hist_sp_q1[numpy.newaxis]
hist_sp_q2 = hist_sp_q2[numpy.newaxis]
hist_sp_q3 = hist_sp_q3[numpy.newaxis]
hist_sp_q4 = hist_sp_q4[numpy.newaxis]
# 3 layers
# noinspection PyTypeChecker
ythird = numpy.ceil(cordy.max()/3)
l1 = cordy <= ythird
l2 = (cordy > ythird) & (cordy <= 2*ythird)
l3 = cordy > 2*ythird
#: :type: numpy.core.multiarray.ndarray
hist_sp_l1 = numpy.histogram(feas[l1], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_l2 = numpy.histogram(feas[l2], bins=bins)[0]
#: :type: numpy.core.multiarray.ndarray
hist_sp_l3 = numpy.histogram(feas[l3], bins=bins)[0]
hist_sp_l1 = hist_sp_l1[numpy.newaxis]
hist_sp_l2 = hist_sp_l2[numpy.newaxis]
hist_sp_l3 = hist_sp_l3[numpy.newaxis]
# concatenate
hist_sp = numpy.vstack((hist_sp_g, hist_sp_q1, hist_sp_q2,
hist_sp_q3, hist_sp_q4, hist_sp_l1,
hist_sp_l2, hist_sp_l3))
return hist_sp
def _get_data_temp_path(self, de):
"""
Standard method of generating/getting a data element's temporary file
path.
:param de: DataElement instance to generate/get temporary file path.
:type de: smqtk.representation.DataElement
:return: Path to the element's temporary file.
:rtype: str
"""
temp_dir = None
# Shortcut when data element is from file, since we are not going to
# write to / modify the file.
if not isinstance(de, DataFileElement):
temp_dir = self.temp_dir
return de.write_temp(temp_dir)
# noinspection PyAbstractClass,PyPep8Naming
class ColorDescriptor_Image (ColorDescriptor_Base):
def valid_content_types(self):
"""
:return: A set valid MIME type content types that this descriptor can
handle.
:rtype: set[str]
"""
return {'image/bmp', 'image/jpeg', 'image/png', 'image/tiff'}
def _generate_descriptor_matrices(self, data_set, **kwargs):
"""
Generate info and descriptor matrices based on ingest type.
:param data_set: Iterable of data elements to generate combined info
and descriptor matrices for.
:type item_iter: collections.Set[smqtk.representation.DataElement]
:param limit: Limit the number of descriptor entries to this amount.
:type limit: int
:return: Combined info and descriptor matrices for all base images
:rtype: (numpy.core.multiarray.ndarray, numpy.core.multiarray.ndarray)
"""
if not data_set:
raise ValueError("No data given to process.")
inf = float('inf')
descriptor_limit = kwargs.get('limit', inf)
per_item_limit = numpy.floor(float(descriptor_limit) / len(data_set))
if len(data_set) == 1:
# because an iterable doesn't necessarily have a next() method
di = next(iter(data_set))
# Check for checkpoint files
info_fp, desc_fp = \
self._get_standard_info_descriptors_filepath(di)
# Save out data bytes to temporary file
temp_img_filepath = self._get_data_temp_path(di)
try:
# Generate descriptors
utils.generate_descriptors(
self.EXE, temp_img_filepath,
self.descriptor_type(), info_fp, desc_fp, per_item_limit
)
finally:
# clean temp file
di.clean_temp()
return numpy.load(info_fp), numpy.load(desc_fp)
else:
# compute and V-stack matrices for all given images
pool = multiprocessing.Pool(processes=self.parallel)
# Mapping of UID to tuple containing:
# (info_fp, desc_fp, async processing result, tmp_clean_method)
r_map = {}
with SimpleTimer("Computing descriptors async...", self._log.debug):
for di in data_set:
# Creating temporary image file from data bytes
tmp_img_fp = self._get_data_temp_path(di)
info_fp, desc_fp = \
self._get_standard_info_descriptors_filepath(di)
args = (self.EXE, tmp_img_fp,
self.descriptor_type(), info_fp, desc_fp)
r = pool.apply_async(utils.generate_descriptors, args)
r_map[di.uuid()] = (info_fp, desc_fp, r, di.clean_temp)
pool.close()
# Pass through results from descriptor generation, aggregating
# matrix shapes.
# - Transforms r_map into:
# UID -> (info_fp, desc_fp, starting_row, SubSampleIndices)
self._log.debug("Constructing information for super matrices...")
s_keys = sorted(r_map.keys())
running_height = 0 # info and desc heights congruent
i_width = None
d_width = None
for uid in s_keys:
ifp, dfp, r, tmp_clean_method = r_map[uid]
# descriptor generation may have failed for this ingest UID
try:
i_shape, d_shape = r.get()
except RuntimeError as ex:
self._log.warning("Descriptor generation failed for "
"UID[%s], skipping its inclusion in "
"model: %s", uid, str(ex))
r_map[uid] = None
continue
finally:
# Done with image file, so remove from filesystem
tmp_clean_method()
if d_width is None and d_shape[0] != 0:
i_width = i_shape[1]
d_width = d_shape[1]
# skip this result if it generated no descriptors
if d_shape[1] == 0:
continue
ssi = None
if i_shape[0] > per_item_limit:
# pick random indices to subsample down to size limit
ssi = sorted(
numpy.random.permutation(i_shape[0])[:per_item_limit]
)
# Only keep this if any descriptors were generated
r_map[uid] = (ifp, dfp, running_height, ssi)
running_height += min(i_shape[0], per_item_limit)
pool.join()
# Asynchronously load files, inserting data into master matrices
self._log.debug("Building super matrices...")
master_info = numpy.zeros((running_height, i_width), dtype=float)
master_desc = numpy.zeros((running_height, d_width), dtype=float)
tp = multiprocessing.pool.ThreadPool(processes=self.parallel)
for uid in s_keys:
if r_map[uid]:
ifp, dfp, sR, ssi = r_map[uid]
tp.apply_async(ColorDescriptor_Image._thread_load_matrix,
args=(ifp, master_info, sR, ssi))
tp.apply_async(ColorDescriptor_Image._thread_load_matrix,
args=(dfp, master_desc, sR, ssi))
tp.close()
tp.join()
return master_info, master_desc
@staticmethod
def _thread_load_matrix(filepath, m, sR, subsample=None):
"""
load a numpy matrix from ``filepath``, inserting the loaded matrix into
``m`` starting at the row ``sR``.
If subsample has a value, it will be a list if indices to
"""
n = numpy.load(filepath)
if subsample:
n = n[subsample, :]
m[sR:sR+n.shape[0], :n.shape[1]] = n
# noinspection PyAbstractClass,PyPep8Naming
class ColorDescriptor_Video (ColorDescriptor_Base):
# # Custom higher limit for video since, ya know, they have multiple frames.
CODEBOOK_DESCRIPTOR_LIMIT = 1500000
FRAME_EXTRACTION_PARAMS = {
"second_offset": 0.0, # Start at beginning
"second_interval": 0.5, # Sample every 0.5 seconds
"max_duration": 1.0, # Cover full duration
"output_image_ext": 'png', # Output PNG files
"ffmpeg_exe": "ffmpeg",
}
def valid_content_types(self):
"""
:return: A set valid MIME type content types that this descriptor can
handle.
:rtype: set[str]
"""
# At the moment, assuming ffmpeg can decode all video types, which it
# probably cannot, but we'll filter this down when it becomes relevant.
# noinspection PyUnresolvedReferences
# TODO: GIF support?
return set([x for x in mimetypes.types_map.values()
if x.startswith('video')])
def _generate_descriptor_matrices(self, data_set, **kwargs):
"""
Generate info and descriptor matrices based on ingest type.
:param data_set: Iterable of data elements to generate combined info
and descriptor matrices for.
:type item_iter: collections.Set[smqtk.representation.DataElement]
:param limit: Limit the number of descriptor entries to this amount.
:type limit: int
:return: Combined info and descriptor matrices for all base images
:rtype: (numpy.core.multiarray.ndarray, numpy.core.multiarray.ndarray)
"""
descriptor_limit = kwargs.get('limit', float('inf'))
# With videos, an "item" is one video, so, collect for a while video
# as normal, then subsample from the full video collection.
per_item_limit = numpy.floor(float(descriptor_limit) / len(data_set))
# If an odd number of jobs, favor descriptor extraction
if self.parallel:
descr_parallel = int(max(1.0, math.ceil(self.parallel/2.0)))
extract_parallel = int(max(1.0, math.floor(self.parallel/2.0)))
else:
cpuc = multiprocessing.cpu_count()
descr_parallel = int(max(1.0, math.ceil(cpuc/2.0)))
extract_parallel = int(max(1.0, math.floor(cpuc/2.0)))
# For each video, extract frames and submit colorDescriptor processing
# jobs for each frame, combining all results into a single matrix for
# return.
pool = multiprocessing.Pool(processes=descr_parallel)
# Mapping of [UID] to [frame] to tuple containing:
# (info_fp, desc_fp, async processing result)
r_map = {}
with SimpleTimer("Extracting frames and submitting descriptor jobs...",
self._log.debug):
for di in data_set:
r_map[di.uuid()] = {}
tmp_vid_fp = self._get_data_temp_path(di)
p = dict(self.FRAME_EXTRACTION_PARAMS)
vmd = get_metadata_info(tmp_vid_fp)
p['second_offset'] = vmd.duration * p['second_offset']
p['max_duration'] = vmd.duration * p['max_duration']
fm = video_utils.ffmpeg_extract_frame_map(
self._work_dir,
tmp_vid_fp,
parallel=extract_parallel,
**p
)
# Compute descriptors for extracted frames.
for frame, imgPath in six.iteritems(fm):
info_fp, desc_fp = \
self._get_standard_info_descriptors_filepath(di, frame)
r = pool.apply_async(
utils.generate_descriptors,
args=(self.EXE, imgPath,
self.descriptor_type(), info_fp, desc_fp)
)
r_map[di.uuid()][frame] = (info_fp, desc_fp, r)
# Clean temporary video file file while computing descriptors
# This does not remove the extracted frames that the underlying
# detector/descriptor is working on.
di.clean_temp()
pool.close()
# Each result is a tuple of two ndarrays: info and descriptor matrices
with SimpleTimer("Collecting shape information for super matrices...",
self._log.debug):
running_height = 0
i_width = None
d_width = None
# Transform r_map[uid] into:
# (info_mat_files, desc_mat_files, sR, ssi_list)
# -> files in frame order
uids = sorted(r_map)
for uid in uids:
video_num_desc = 0
video_info_mat_fps = [] # ordered list of frame info mat files
video_desc_mat_fps = [] # ordered list of frame desc mat files
for frame in sorted(r_map[uid]):
ifp, dfp, r = r_map[uid][frame]
# Descriptor generation may have failed for this UID
try:
i_shape, d_shape = r.get()
except RuntimeError as ex:
self._log.warning('Descriptor generation failed for '
'frame %d in video UID[%s]: %s',
frame, uid, str(ex))
r_map[uid] = None
continue
if d_width is None and d_shape[0] != 0:
i_width = i_shape[1]
d_width = d_shape[1]
# Skip if there were no descriptors generated for this
# frame
if d_shape[1] == 0:
continue
video_info_mat_fps.append(ifp)
video_desc_mat_fps.append(dfp)
video_num_desc += d_shape[0]
# If combined descriptor height exceeds the per-item limit,
# generate a random subsample index list
ssi = None
if video_num_desc > per_item_limit:
ssi = sorted(
numpy.random
.permutation(video_num_desc)[:per_item_limit]
)
video_num_desc = len(ssi)
r_map[uid] = (video_info_mat_fps, video_desc_mat_fps,
running_height, ssi)
running_height += video_num_desc
pool.join()
del pool
with SimpleTimer("Building master descriptor matrices...",
self._log.debug):
master_info = numpy.zeros((running_height, i_width), dtype=float)
master_desc = numpy.zeros((running_height, d_width), dtype=float)
tp = multiprocessing.pool.ThreadPool(processes=self.parallel)
for uid in uids:
info_fp_list, desc_fp_list, sR, ssi = r_map[uid]
tp.apply_async(ColorDescriptor_Video._thread_load_matrices,
args=(master_info, info_fp_list, sR, ssi))
tp.apply_async(ColorDescriptor_Video._thread_load_matrices,
args=(master_desc, desc_fp_list, sR, ssi))
tp.close()
tp.join()
return master_info, master_desc
@staticmethod
def _thread_load_matrices(m, file_list, sR, subsample=None):
"""
load numpy matrices from files in ``file_list``, concatenating them
vertically. If a list of row indices is provided in ``subsample`` we
subsample those rows out of the concatenated matrix. This matrix is then
inserted into ``m`` starting at row ``sR``.
"""
c = numpy.load(file_list[0])
for i in range(1, len(file_list)):
c = numpy.vstack((c, numpy.load(file_list[i])))
if subsample:
c = c[subsample, :]
m[sR:sR+c.shape[0], :c.shape[1]] = c
# Begin automatic class type creation
valid_descriptor_types = [
'rgbhistogram',
'opponenthistogram',
'huehistogram',
'nrghistogram',
'transformedcolorhistogram',
'colormoments',
'colormomentinvariants',
'sift',
'huesift',
'hsvsift',
'opponentsift',
'rgsift',
'csift',
'rgbsift',
]
def _create_image_descriptor_class(descriptor_type_str):
"""
Create and return a ColorDescriptor class that operates over Image files
using the given descriptor type.
"""
assert descriptor_type_str in valid_descriptor_types, \
"Given ColorDescriptor type was not valid! Given: %s. Expected one " \
"of: %s" % (descriptor_type_str, valid_descriptor_types)
# noinspection PyPep8Naming
class _cd_image_impl (ColorDescriptor_Image):
def descriptor_type(self):
"""
:rtype: str
"""
return descriptor_type_str
_cd_image_impl.__name__ = "ColorDescriptor_Image_%s" % descriptor_type_str
return _cd_image_impl
def _create_video_descriptor_class(descriptor_type_str):
"""
Create and return a ColorDescriptor class that operates over Video files
using the given descriptor type.
"""
assert descriptor_type_str in valid_descriptor_types, \
"Given ColorDescriptor type was not valid! Given: %s. Expected one " \
"of: %s" % (descriptor_type_str, valid_descriptor_types)
# noinspection PyPep8Naming
class _cd_video_impl (ColorDescriptor_Video):
def descriptor_type(self):
"""
:rtype: str
"""
return descriptor_type_str
_cd_video_impl.__name__ = "ColorDescriptor_Video_%s" % descriptor_type_str
return _cd_video_impl
# In order to allow multiprocessing, class types must be concretely assigned to
# variables in the module. Dynamic generation causes issues with pickling (the
# default data transmission protocol).
ColorDescriptor_Image_rgbhistogram = \
_create_image_descriptor_class('rgbhistogram')
ColorDescriptor_Image_opponenthistogram = \
_create_image_descriptor_class('opponenthistogram')
ColorDescriptor_Image_huehistogram = \
_create_image_descriptor_class('huehistogram')
ColorDescriptor_Image_nrghistogram = \
_create_image_descriptor_class('nrghistogram')
ColorDescriptor_Image_transformedcolorhistogram = \
_create_image_descriptor_class('transformedcolorhistogram')
ColorDescriptor_Image_colormoments = \
_create_image_descriptor_class('colormoments')
ColorDescriptor_Image_colormomentinvariants = \
_create_image_descriptor_class('colormomentinvariants')
ColorDescriptor_Image_sift = _create_image_descriptor_class('sift')
ColorDescriptor_Image_huesift = _create_image_descriptor_class('huesift')
ColorDescriptor_Image_hsvsift = _create_image_descriptor_class('hsvsift')
ColorDescriptor_Image_opponentsift = \
_create_image_descriptor_class('opponentsift')
ColorDescriptor_Image_rgsift = _create_image_descriptor_class('rgsift')
ColorDescriptor_Image_csift = _create_image_descriptor_class('csift')
ColorDescriptor_Image_rgbsift = _create_image_descriptor_class('rgbsift')
ColorDescriptor_Video_rgbhistogram = \
_create_video_descriptor_class('rgbhistogram')
ColorDescriptor_Video_opponenthistogram = \
_create_video_descriptor_class('opponenthistogram')
ColorDescriptor_Video_huehistogram = \
_create_video_descriptor_class('huehistogram')
ColorDescriptor_Video_nrghistogram = \
_create_video_descriptor_class('nrghistogram')
ColorDescriptor_Video_transformedcolorhistogram = \
_create_video_descriptor_class('transformedcolorhistogram')
ColorDescriptor_Video_colormoments = \
_create_video_descriptor_class('colormoments')
ColorDescriptor_Video_colormomentinvariants = \
_create_video_descriptor_class('colormomentinvariants')
ColorDescriptor_Video_sift = _create_video_descriptor_class('sift')
ColorDescriptor_Video_huesift = _create_video_descriptor_class('huesift')
ColorDescriptor_Video_hsvsift = _create_video_descriptor_class('hsvsift')
ColorDescriptor_Video_opponentsift = \
_create_video_descriptor_class('opponentsift')
ColorDescriptor_Video_rgsift = _create_video_descriptor_class('rgsift')
ColorDescriptor_Video_csift = _create_video_descriptor_class('csift')
ColorDescriptor_Video_rgbsift = _create_video_descriptor_class('rgbsift')
cd_type_list = [
ColorDescriptor_Image_rgbhistogram,
ColorDescriptor_Video_rgbhistogram,
ColorDescriptor_Image_opponenthistogram,
ColorDescriptor_Video_opponenthistogram,
ColorDescriptor_Image_huehistogram,
ColorDescriptor_Video_huehistogram,
ColorDescriptor_Image_nrghistogram,
ColorDescriptor_Video_nrghistogram,
ColorDescriptor_Image_transformedcolorhistogram,
ColorDescriptor_Video_transformedcolorhistogram,
ColorDescriptor_Image_colormoments,
ColorDescriptor_Video_colormoments,
ColorDescriptor_Image_colormomentinvariants,
ColorDescriptor_Video_colormomentinvariants,
ColorDescriptor_Image_sift,
ColorDescriptor_Video_sift,
ColorDescriptor_Image_huesift,
ColorDescriptor_Video_huesift,
ColorDescriptor_Image_hsvsift,
ColorDescriptor_Video_hsvsift,
ColorDescriptor_Image_opponentsift,
ColorDescriptor_Video_opponentsift,
ColorDescriptor_Image_rgsift,
ColorDescriptor_Video_rgsift,
ColorDescriptor_Image_csift,
ColorDescriptor_Video_csift,
ColorDescriptor_Image_rgbsift,
ColorDescriptor_Video_rgbsift,
]
|
from django.core.exceptions import ValidationError
from django.shortcuts import render, redirect
from django.http import HttpResponse
# from django.shortcuts
from lists.forms import ItemForm
from lists.models import Item, List
# Create your views here.
def home_page( request ):
return render(
request,
'home.html',
{'form': ItemForm() }
)
def view_list( request, list_id ):
itemlist = List.objects.get( id=list_id )
# form = ItemForm()
# basic method
# if request.method == 'POST':
# form = ItemForm( data=request.POST )
# if form.is_valid():
# # Item.objects.create( text=request.POST['text'], list=itemlist )
# form.save( for_list=itemlist )
# return redirect( itemlist )
# POST or none method
form = ItemForm( data=request.POST or None )
if form.is_valid() :
form.save( for_list=itemlist )
return redirect( itemlist )
return render( request, 'list.html',
{ 'list': itemlist, 'form': form }
)
def new_list( request ):
form = ItemForm( data=request.POST )
if( form.is_valid()):
itemlist = List.objects.create()
# Item.objects.create( text=request.POST['text'] , list=itemlist)
form.save( for_list=itemlist )
# even better with the model defining get_absolute_url
return redirect( itemlist )
else:
return render( request, 'home.html', {"form": form } )
|
from itertools import groupby
def double_check(s):
return any(sum(1 for _ in g) == 2 for _, g in groupby(s.lower()))
# from re import compile, search
#
# REGEX = compile(r'(.)\1')
#
#
# def double_check(s):
# return bool(search(REGEX, s.lower()))
|
# 用户
from Gamer.models import User
import json as simplejson
from django.views.generic import View
from django.http import JsonResponse
class Username(View):
# username
def post(self,request,*args,**kwargs):
# username = request.POST.get('username')
req = simplejson.loads(request.body.decode('utf-8'))
username = req['username']
complete = User.objects.filter(username=username)
if not complete:
User.objects.create(username=username)
a = {'success': True,"status":1}
return JsonResponse(a)
else:
msg = "用户已重复,请更换注册名字"
return JsonResponse(
{
"status":0,
"success":False,
"message":msg,
},
json_dumps_params={'ensure_ascii': False}
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.