content stringlengths 5 1.05M |
|---|
# Copyright (c) Sebastian Scholz
# See LICENSE for details.
""" Classes for representing and dealing with oauth2 clients """
from abc import abstractmethod, ABCMeta
try:
from urlparse import urlparse
except ImportError:
# noinspection PyUnresolvedReferences
from urllib.parse import urlparse
from txoauth2.util import isAnyStr
from txoauth2.granttypes import GrantTypes
from txoauth2.errors import InvalidClientAuthenticationError, NoClientAuthenticationError
class ClientStorage(object):
"""
This class's purpose is to manage and give access
to the clients that the server knows via their clientId.
"""
__metaclass__ = ABCMeta
# pylint: disable=no-self-use, unused-argument
def authenticateClient(self, client, request, secret=None):
"""
Authenticate a given client.
:raises OAuth2Error: If the client could not be authenticated.
:param client: The client that should get authenticated.
:param request: The request that may contain the credentials for a client.
:param secret: The client secret, if it could get extracted from the request.
:return: The client that was authenticated by the request.
"""
if secret is not None:
if isinstance(client, PasswordClient) and client.secret == secret:
return client
raise InvalidClientAuthenticationError()
raise NoClientAuthenticationError()
@abstractmethod
def getClient(self, clientId):
"""
Return a Client object representing the client with the given clientId.
:raises KeyError: If no client with the given clientId is found.
:param clientId: The client id of the client.
:return: The Client object.
"""
raise NotImplementedError()
class Client(object):
"""
This class represents a client.
A client is an entity, which is given access to a scope by the user.
The client can use a grant type it is authorized to use to request an access token
with which it can access resources on behalf of the user.
"""
def __init__(self, clientId, redirectUris, authorizedGrantTypes):
"""
:raises ValueError: If one of the argument is not of the expected type
or one of the redirect uris has a fragment or is relative.
:param clientId: The id of this client.
:param redirectUris: A list of urls, which we can redirect to after authorization.
:param authorizedGrantTypes: A list of grant types that this client is authorized
to use to get an access token.
"""
super(Client, self).__init__()
if not isAnyStr(clientId):
raise ValueError('Expected clientId must be a string, got ' + str(type(clientId)))
if not isinstance(redirectUris, list):
raise ValueError('Expected redirectUris to be of type list, got '
+ str(type(redirectUris)))
for uri in redirectUris:
if not isinstance(uri, str):
raise ValueError('Expected the redirectUris to be of type str, got '
+ str(type(uri)))
parsedUri = urlparse(uri)
if parsedUri.fragment != '':
raise ValueError('Got a redirect uri with a fragment: ' + uri)
if parsedUri.netloc == '':
raise ValueError('Got a redirect uri that is not absolute: ' + uri)
if not isinstance(authorizedGrantTypes, list):
raise ValueError('Expected authorizedGrantTypes to be of type list, got '
+ str(type(authorizedGrantTypes)))
authorizedGrantTypes = [grantType.value if isinstance(grantType, GrantTypes) else grantType
for grantType in authorizedGrantTypes]
for grantType in authorizedGrantTypes:
if not isinstance(grantType, str):
raise ValueError('Expected the grant types to be of type str, got '
+ str(type(grantType)))
self.id = clientId # pylint: disable=invalid-name
self.redirectUris = redirectUris
self.authorizedGrantTypes = authorizedGrantTypes
class PublicClient(Client):
"""
This is a public client which is not able to maintain the confidentiality of their
credentials and thus are not required to authenticate themselves.
See: https://tools.ietf.org/html/rfc6749#section-2.1
"""
def __init__(self, *args, **kwargs):
super(PublicClient, self).__init__(*args, **kwargs)
class PasswordClient(Client):
"""
This is a confidential client which authenticates himself with a password/secret.
See: https://tools.ietf.org/html/rfc6749#section-2.3.1
"""
def __init__(self, clientId, redirectUris, authorizedGrantTypes, secret):
super(PasswordClient, self).__init__(clientId, redirectUris, authorizedGrantTypes)
self.secret = secret
|
from flair.data import Corpus
from flair.data import Sentence
from flair.embeddings import TokenEmbeddings, WordEmbeddings, \
StackedEmbeddings, CharacterEmbeddings, FlairEmbeddings, \
PooledFlairEmbeddings, ELMoEmbeddings, BertEmbeddings , RoBERTaEmbeddings
from typing import List
from create_flair_corpus import read_in_AMT, read_in_CADEC, read_in_TwitterADR, read_in_Micromed
# 6. initialize trainer
from flair.trainers import ModelTrainer
# 5. initialize sequence tagger
from flair.models import SequenceTagger
# 6. initialize trainer
from flair.training_utils import EvaluationMetric
# 9. continue trainer at later point
from pathlib import Path
def train(model, selected_embeddings):
# 1. get the corpus
if model == 'AMT':
corpus = read_in_AMT()
elif model == 'CADEC':
corpus = read_in_CADEC()
elif model == 'TwitterADR':
corpus = read_in_TwitterADR()
elif model == 'Micromed':
corpus = read_in_Micromed()
print(corpus)
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
print(tag_dictionary.idx2item)
embedding_types: List[TokenEmbeddings] = [
]
if selected_embeddings['glove']:
embedding_types.append(WordEmbeddings('glove'))
if selected_embeddings['twitter']:
embedding_types.append(WordEmbeddings('twitter'))
if selected_embeddings['char']:
embedding_types.append(CharacterEmbeddings())
# FlairEmbeddings
if selected_embeddings['flair']:
embedding_types.append(FlairEmbeddings('news-forward'))
# sFlairEmbeddings
if selected_embeddings['flair']:
embedding_types.append(FlairEmbeddings('news-backward'))
# PooledFlairEmbeddings
if selected_embeddings['pooled-flair']:
embedding_types.append(PooledFlairEmbeddings('news-forward', pooling='mean'))
# PooledFlairEmbeddings
if selected_embeddings['pooled-flair']:
embedding_types.append(PooledFlairEmbeddings('news-backward', pooling='mean'))
# init BERT
if selected_embeddings['bert']:
embedding_types.append(BertEmbeddings())
# init roberta
if selected_embeddings['roberta']:
embedding_types.append(RoBERTaEmbeddings())
# init BioBERT
if selected_embeddings['biobert']:
embedding_types.append(BertEmbeddings("data/embeddings/biobert-pubmed-pmc-cased"))
# init clinical BERT
if selected_embeddings['clinicalbiobert']:
embedding_types.append(BertEmbeddings("data/embeddings/pretrained_bert_tf/biobert-base-clinical-cased"))
# init multilingual ELMo
if selected_embeddings['elmo']:
embedding_types.append(ELMoEmbeddings())
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
tagger: SequenceTagger = SequenceTagger(hidden_size=256,
embeddings=embeddings,
tag_dictionary=tag_dictionary,
tag_type=tag_type,
use_crf=True
)
trainer: ModelTrainer = ModelTrainer(tagger, corpus)
selected_embeddings_text = [key for key in selected_embeddings if selected_embeddings[key]]
selected_embeddings_text = '_'.join(selected_embeddings_text)
model_dir = 'resources/taggers/FA_' + model + selected_embeddings_text
# 7. start training
trainer.train(model_dir,
train_with_dev=True,
learning_rate=0.1,
mini_batch_size=4,
max_epochs=200,
checkpoint=True)
# 8. plot training curves (optional)
from flair.visual.training_curves import Plotter
plotter = Plotter()
plotter.plot_training_curves(model_dir + '/loss.tsv')
plotter.plot_weights(model_dir + '/weights.txt')
def test(model, selected_embeddings):
selected_embeddings_text = [key for key in selected_embeddings if selected_embeddings[key]]
selected_embeddings_text = '_'.join(selected_embeddings_text)
print (selected_embeddings_text)
model_dir = 'resources/taggers/' + model + selected_embeddings_text + '_fine-tuned7s'
# load the model you trained
model = SequenceTagger.load(model_dir + '/best-model.pt')
sentence = Sentence("If you've been on a low calorie diet + exercise for a long time, probably you have low free T3 blood levels causing your hypo symptoms. You should ask specifically for freeT3 and freeT4 to be tested. The low conversion of T4 to T3 is your bodies way of ""protecting itself"" from any further calorie deficiet. The rest of this only matters if you do get low T3 confirmed: it is important you do not go on a T4 monotherapy, it would very likely make your situation worse because it's tricking your brain into thinking you have more then enough thyroid hormones, while your T3 deficit worsens. Either get T3 and T4 combination or no medication. Instead make sure you have enough Iodine, Selenium and Zinc in your diet and consider significantly increasing your calorie intake! It seems paradoxical but because this will eventually increase you T3 levels and basal metabolic rate it will not necessarily make you gain weight in the long term. Also dizzy spells could be low blood sugar (even if you don't who the classical symptoms of shaking/sweating.) If it is low blood sugar you need to be careful with that and make sure to get some glucose quick (both for preventing your dizzines causing accidents and also because every hypoglycemic state will stress out your metabolic system, autoamplifying the low T3)")
# # predict tags and print
model.predict(sentence)
print(sentence.to_dict(tag_type='ner'))
for x in range(1,10):
pass
def resume(model1, selected_embeddings, model2):
# 1. get the corpus
if model2 == 'AMT':
corpus = read_in_AMT()
elif model2 == 'CADEC':
corpus = read_in_CADEC()
elif model2 == 'TwitterADR':
corpus = read_in_TwitterADR()
elif model2 == 'Micromed':
corpus = read_in_Micromed()
print(corpus)
# 2. what tag do we want to predict?
tag_type = 'ner'
# 3. make the tag dictionary from the corpus
tag_dictionary = corpus.make_tag_dictionary(tag_type=tag_type)
print(tag_dictionary.idx2item)
embedding_types: List[TokenEmbeddings] = [
]
if selected_embeddings['glove']:
embedding_types.append(WordEmbeddings('glove'))
if selected_embeddings['twitter']:
embedding_types.append(WordEmbeddings('twitter'))
if selected_embeddings['char']:
embedding_types.append(CharacterEmbeddings())
if selected_embeddings['flair']:
embedding_types.append(FlairEmbeddings('news-forward'))
if selected_embeddings['flair']:
embedding_types.append(FlairEmbeddings('news-backward'))
if selected_embeddings['pooled-flair']:
embedding_types.append(PooledFlairEmbeddings('news-forward', pooling='mean'))
if selected_embeddings['pooled-flair']:
embedding_types.append(PooledFlairEmbeddings('news-backward', pooling='mean'))
# init multilingual BERT
if selected_embeddings['bert']:
embedding_types.append(BertEmbeddings())
# init multilingual ELMo
if selected_embeddings['elmo']:
embedding_types.append(ELMoEmbeddings())
embeddings: StackedEmbeddings = StackedEmbeddings(embeddings=embedding_types)
# tagger: SequenceTagger = SequenceTagger(hidden_size=256,
# embeddings=embeddings,
# tag_dictionary=tag_dictionary,
# tag_type=tag_type,
# use_crf=True)
selected_embeddings_text = [key for key in selected_embeddings if selected_embeddings[key]]
selected_embeddings_text = '_'.join(selected_embeddings_text)
model_dir1 = 'resources/taggers/to_resume_CoNLL-03_' + model1 + selected_embeddings_text
#checkpoint = tagger.load_checkpoint(Path(model_dir1+ '/checkpoint.pt'))
#trainer = ModelTrainer.load_from_checkpoint(checkpoint, corpus)
best_model = SequenceTagger.load(Path(model_dir1+ '/best-model.pt'))
trainer: ModelTrainer = ModelTrainer(best_model, corpus)
# resources/taggers/to_resume_CADECglove_char_flair/
model_dir2 = 'resources/taggers/train_with_dev_from_' + model1 + '_to_' + model2 + selected_embeddings_text + '_fine-tuned7s'
trainer.train(model_dir2,
EvaluationMetric.MICRO_F1_SCORE,
train_with_dev=True,
learning_rate=0.1,
mini_batch_size=8,
max_epochs=150,
checkpoint=True)
# params
model = 'Micromed'
selected_embeddings = {'glove':1, 'char':0, 'flair':0, 'pooled-flair':0, \
'bert':1, 'twitter':0, 'elmo':0, 'roberta':0, \
'biobert':0, 'clinicalbiobert':0}
train(model, selected_embeddings)
# to train different models and parameteres, you can uncomment and/or change the code below
# # params
# model = 'CADEC'
# selected_embeddings = {'glove':0, 'char':0, 'flair':0, 'pooled-flair':1, \
# 'bert':0, 'twitter':0, 'elmo':1, 'roberta':1}
# train(model, selected_embeddings)
# # params
# model = 'Micromed'
# selected_embeddings = {'glove':0, 'twitter':1, 'char':0, 'flair':0, 'pooled-flair':1}
# train(model, selected_embeddings)
# selected_embeddings = {'glove':1, 'char':1, 'flair':1}
# model1 = 'CADEC'
# model2 = 'AMT'
# resume(model1, selected_embeddings, model2)
# model = 'AMT'
# selected_embeddings = {'glove':1, 'char':0, 'flair':0, 'pooled-flair':1}
# train(model, selected_embeddings)
# selected_embeddings = {'glove':1, 'char':0, 'flair':0, 'pooled-flair':1}
# model1 = 'CADEC'
# model2 = 'AMT'
# resume(model1, selected_embeddings, model2)
# selected_embeddings = {'glove':1, 'char':0, 'flair':0, 'pooled-flair':1}
# model1 = 'CADEC'
# model2 = 'Micromed'
# resume(model1, selected_embeddings, model2)
|
from .query import SortFilterQuery
|
# -*- coding: utf-8 -*-
'''
Created on Dec 21, 2013
@author: Chris
'''
from gooey import Gooey
from gooey import GooeyParser
import message
welcome_message = \
r'''
__ __ _
\ \ / / | |
\ \ /\ / /__| | ___ ___ _ __ ___ ___
\ \/ \/ / _ \ |/ __/ _ \| '_ ` _ \ / _ \
\ /\ / __/ | (_| (_) | | | | | | __/
___\/__\/ \___|_|\___\___/|_| |_| |_|\___|
|__ __|
| | ___
| |/ _ \
| | (_) |
_|_|\___/ _ _
/ ____| | | |
| | __ ___ ___ ___ _ _| | |
| | |_ |/ _ \ / _ \ / _ \ | | | | |
| |__| | (_) | (_) | __/ |_| |_|_|
\_____|\___/ \___/ \___|\__, (_|_)
__/ |
|___/
'''
@Gooey(language='japanese', program_name=u'プログラム例')
def arbitrary_function():
desc = u"コマンドライン引数を入力してください"
file_help_msg = u"処理したいファイルの名前"
my_cool_parser = GooeyParser(description=desc)
my_cool_parser.add_argument('foo', metavar=u"ファイルブラウザ",
help=file_help_msg, widget="FileChooser") # positional
my_cool_parser.add_argument(
'-d',
metavar=u'--デュレーション',
default=2,
type=int,
help=u'プログラム出力の期間(秒)'
)
my_cool_parser.add_argument(
'-s',
metavar=u'--スケジュール',
help=u'日時プログラムを開始すべき',
widget='DateChooser'
)
my_cool_parser.add_argument(
"-c",
metavar=u"--ショータイム",
action="store_true",
help=u"カウントダウンタイマーを表示します"
)
my_cool_parser.add_argument(
"-p",
metavar=u"--ポーズ",
action="store_true",
help=u"一時停止の実行"
)
args = my_cool_parser.parse_args()
main(args)
def main(args):
message()
def here_is_more():
pass
if __name__ == '__main__':
arbitrary_function()
|
#!/usr/bin/env python3
"""Modularise a JSON schema
Modularise a JSON schema and allows it to accept a data structure that can be
composed of include files.
Two positional arguments are expected:
1. The file name of the JSON schema file.
2. The file name of a configuration file in JSON format.
The configuration file expects a mapping, where the keys are the file names
(relative paths to current working directory) of the output sub-schema files,
and the values are sub-schema break point locations (expressed as
`JMESPath <https://jmespath.org/>`_ format) in the input JSON schema document.
"""
from argparse import ArgumentParser
import json
import os
import sys
import jmespath
from .dataprocess import DataProcessor
JSON_DUMP_CONFIG = {'indent': 2}
INCLUDE_SCHEMA = {
'properties': {
DataProcessor.INCLUDE_KEY: {
'type': 'string',
},
DataProcessor.QUERY_KEY: {
'type': 'string',
},
},
'required': [DataProcessor.INCLUDE_KEY],
'type': 'object',
}
def schema_process(schema_filename: str, config_filename: str) -> None:
"""Process schema to handle includes according to configuration.
:param schema_filename: schema file name.
:param config_filename: configuration file name.
"""
# Get subschemas, detect any duplicates
schema = json.load(open(schema_filename))
subschemas = {} # {filerelname: subschema, ...}
schema_filebasename = '0-{}'.format(os.path.basename(schema_filename))
for filerelname, pathstr in json.load(open(config_filename)).items():
pathstr = pathstr.strip()
if not pathstr:
schema_filebasename = filerelname
continue
subschema = jmespath.search(pathstr, schema)
for o_filebasename, o_subschema in subschemas.items():
if subschema is o_subschema:
raise ValueError(
'{}: {} and {}: {} point to the same location.'.format(
filerelname, pathstr, o_filebasename, o_subschema,
)
)
subschemas[filerelname] = subschema
# Take a shallow copy of the subschemas before modifying.
# Dump later in case there are sub-subschemas.
subschema_copies = (
{f: subschema.copy() for f, subschema in subschemas.items()}
)
for filerelname, subschema in subschemas.items():
subschema.clear()
subschema.update({
'oneOf': [{'$ref': filerelname}, INCLUDE_SCHEMA],
})
# Dump subschemas from copies, because original has been modified in place.
for filerelname, subschema in subschema_copies.items():
with open(filerelname, 'w') as subschema_file:
json.dump(subschema, subschema_file, **JSON_DUMP_CONFIG)
with open(schema_filebasename, 'w') as schema_file:
json.dump(schema, schema_file, **JSON_DUMP_CONFIG)
def main(argv=None):
parser = ArgumentParser(description=__doc__)
parser.add_argument(
'schema_filename',
metavar='SCHEMA-FILE',
help='Name of the JSON schema file to modularise')
parser.add_argument(
'config_filename',
metavar='CONFIG-FILE',
help='Name of the configuration file')
args = parser.parse_args(argv)
schema_process(args.schema_filename, args.config_filename)
if __name__ == '__main__':
main(sys.argv)
|
from drone.config.settings import logger
def write_log(msg=None):
def wrap(f):
def wrapped_f(*args, **kwargs):
if msg:
logger.debug(msg)
logger.debug('%s %s %s' % (str(f), str(args), str(kwargs)))
result = f(*args, **kwargs)
logger.debug(result)
return result
return wrapped_f
return wrap
|
# uncompyle6 version 2.13.2
# Python bytecode 3.5 (3351)
# Decompiled from: Python 3.5.3 (default, Jan 19 2017, 14:11:04)
# [GCC 6.3.0 20170118]
# Embedded file name: db\tables\profiles.py
__author__ = 'sanyi'
from sqlalchemy import *
from sqlalchemy.orm import mapper
from sqlalchemy.dialects.postgresql import UUID as C_UUID
from db.tables import metadata
from sqlalchemy.dialects.postgresql import JSONB as TYPE_JSONB
ProfilesTable = Table('scanning_profiles', metadata, Column('profile_id', C_UUID, primary_key=True), Column('owner_id', C_UUID, ForeignKey('users.user_id', ondelete='CASCADE'), index=true), Column('creator_id', C_UUID, ForeignKey('users.user_id', ondelete='CASCADE'), index=true), Column('name', TEXT, nullable=False), Column('jobs', TYPE_JSONB), Column('sort_order', INTEGER), Column('deleted_at', DateTime(true)))
class ProfileRow(object):
profile_id = None
owner_id = None
creator_id = None
name = None
sort_order = None
jobs = None
deleted_at = None
def __str__(self):
return 'R_profile[%s]=%s' % (self.profile_id, self.name)
def __repr__(self):
return self.__str__()
mapper(ProfileRow, ProfilesTable) |
from healthvaultlib.itemtypes.heartrate import HeartRate
from healthvaultlib.itemtypes.allergy import Allergy
from healthvaultlib.itemtypes.questionanswer import QuestionAnswer
from healthvaultlib.itemtypes.bloodpressure import BloodPressure
from healthvaultlib.itemtypes.comment import Comment
from healthvaultlib.itemtypes.geneticsnpresult import GeneticSnpResult
from healthvaultlib.itemtypes.educationmydatafile import EducationMydataFile
from healthvaultlib.itemtypes.familyhistorycondition import FamilyHistoryCondition
from healthvaultlib.itemtypes.bodydimension import BodyDimension
from healthvaultlib.itemtypes.cardiacprofile import CardiacProfile
from healthvaultlib.itemtypes.condition import Condition
from healthvaultlib.itemtypes.dailydietaryintake import DailyDietaryIntake
from healthvaultlib.itemtypes.weblink import WebLink
from healthvaultlib.itemtypes.papsession import PapSession
from healthvaultlib.itemtypes.peakflow import PeakFlow
from healthvaultlib.itemtypes.contact import Contact
from healthvaultlib.itemtypes.healthjournalentry import HealthJournalEntry
from healthvaultlib.itemtypes.cholesterol import Cholesterol
from healthvaultlib.itemtypes.procedure import Procedure
from healthvaultlib.itemtypes.continuityofcarerecord import ContinuityOfCareRecord
from healthvaultlib.itemtypes.labresults import LabResults
from healthvaultlib.itemtypes.applicationdatareference import ApplicationDataReference
from healthvaultlib.itemtypes.personalcontactinformation import PersonalContactInformation
from healthvaultlib.itemtypes.healthassessment import HealthAssessment
from healthvaultlib.itemtypes.clinicaldocumentarchitecture import ClinicalDocumentArchitecture
from healthvaultlib.itemtypes.appspecificinformation import AppspecificInformation
from healthvaultlib.itemtypes.hba1c import Hba1c
from healthvaultlib.itemtypes.sleepjournalentry import SleepJournalEntry
from healthvaultlib.itemtypes.microbiologylabtestresult import MicrobiologyLabTestResult
from healthvaultlib.itemtypes.familyhistoryperson import FamilyHistoryPerson
from healthvaultlib.itemtypes.menstruation import Menstruation
from healthvaultlib.itemtypes.vitalsigns import VitalSigns
from healthvaultlib.itemtypes.healthgoal import HealthGoal
from healthvaultlib.itemtypes.dischargesummary import DischargeSummary
from healthvaultlib.itemtypes.medicalimagestudy import MedicalImageStudy
from healthvaultlib.itemtypes.bloodglucose import BloodGlucose
from healthvaultlib.itemtypes.careplan import CarePlan
from healthvaultlib.itemtypes.insulininjectionusage import InsulinInjectionUsage
from healthvaultlib.itemtypes.mealdefinition import MealDefinition
from healthvaultlib.itemtypes.file import File
from healthvaultlib.itemtypes.exercisesamples import ExerciseSamples
from healthvaultlib.itemtypes.continuityofcaredocument import ContinuityOfCareDocument
from healthvaultlib.itemtypes.sleepsession import SleepSession
from healthvaultlib.itemtypes.asthmainhaler import AsthmaInhaler
from healthvaultlib.itemtypes.medicalannotation import MedicalAnnotation
from healthvaultlib.itemtypes.bodycomposition import BodyComposition
from healthvaultlib.itemtypes.weight import Weight
from healthvaultlib.itemtypes.status import Status
from healthvaultlib.itemtypes.diabeticprofile import DiabeticProfile
from healthvaultlib.itemtypes.respiratoryprofile import RespiratoryProfile
from healthvaultlib.itemtypes.fooddrink import FoodDrink
from healthvaultlib.itemtypes.aerobicprofile import AerobicProfile
from healthvaultlib.itemtypes.weightgoal import WeightGoal
from healthvaultlib.itemtypes.asthmainhalerusage import AsthmaInhalerUsage
from healthvaultlib.itemtypes.emotionalstate import EmotionalState
from healthvaultlib.itemtypes.familyhistory import FamilyHistory
from healthvaultlib.itemtypes.calorieguideline import CalorieGuideline
from healthvaultlib.itemtypes.weeklyaerobicexercisegoal import WeeklyAerobicExerciseGoal
from healthvaultlib.itemtypes.medication import Medication
from healthvaultlib.itemtypes.advancedirective import AdvanceDirective
from healthvaultlib.itemtypes.insulininjection import InsulinInjection
from healthvaultlib.itemtypes.allergicepisode import AllergicEpisode
from healthvaultlib.itemtypes.encounter import Encounter
from healthvaultlib.itemtypes.personalpicture import PersonalPicture
from healthvaultlib.itemtypes.exercise import Exercise
from healthvaultlib.itemtypes.healthcareproxy import HealthcareProxy
from healthvaultlib.itemtypes.bloodoxygensaturation import BloodOxygenSaturation
from healthvaultlib.itemtypes.immunization import Immunization
from healthvaultlib.itemtypes.message import Message
from healthvaultlib.itemtypes.personaldemographicinformation import PersonalDemographicInformation
from healthvaultlib.itemtypes.height import Height
from healthvaultlib.itemtypes.pregnancy import Pregnancy
from healthvaultlib.itemtypes.medicalproblem import MedicalProblem
from healthvaultlib.itemtypes.medicaldevice import MedicalDevice
from healthvaultlib.itemtypes.radiologyresult import RadiologyResult
from healthvaultlib.itemtypes.concern import Concern
from healthvaultlib.itemtypes.groupmembership import GroupMembership
from healthvaultlib.itemtypes.groupmembershipactivity import GroupMembershipActivity
from healthvaultlib.itemtypes.medicationfill import MedicationFill
from healthvaultlib.itemtypes.dailymedicationusage import DailyMedicationUsage
from healthvaultlib.itemtypes.insuranceplan import InsurancePlan
from healthvaultlib.itemtypes.educationsifstudentacademicrecord import EducationSifStudentAcademicRecord
from healthvaultlib.itemtypes.educationdocument import EducationDocument
from healthvaultlib.itemtypes.appointment import Appointment
from healthvaultlib.itemtypes.passwordprotectedpackage import PasswordprotectedPackage
from healthvaultlib.itemtypes.lifegoal import LifeGoal
from healthvaultlib.itemtypes.defibrillatorepisode import DefibrillatorEpisode
from healthvaultlib.itemtypes.explanationofbenefits import ExplanationOfBenefits
from healthvaultlib.itemtypes.healthevent import HealthEvent
from healthvaultlib.itemtypes.contraindication import Contraindication
from healthvaultlib.itemtypes.basicdemographicinformation import BasicDemographicInformation
class ItemTypeResolver():
itemtype_dict = {}
def __init__(self):
self.itemtype_dict['b81eb4a6-6eac-4292-ae93-3872d6870994'] = HeartRate
self.itemtype_dict['52bf9104-2c5e-4f1f-a66d-552ebcc53df7'] = Allergy
self.itemtype_dict['55d33791-58de-4cae-8c78-819e12ba5059'] = QuestionAnswer
self.itemtype_dict['ca3c57f4-f4c1-4e15-be67-0a3caf5414ed'] = BloodPressure
self.itemtype_dict['9f4e0fcd-10d7-416d-855a-90514ce2016b'] = Comment
self.itemtype_dict['9d006053-116c-43cc-9554-e0cda43558cb'] = GeneticSnpResult
self.itemtype_dict['0aa6a4c7-cef5-46ea-970e-206c8402dccb'] = EducationMydataFile
self.itemtype_dict['6705549b-0e3d-474e-bfa7-8197ddd6786a'] = FamilyHistoryCondition
self.itemtype_dict['dd710b31-2b6f-45bd-9552-253562b9a7c1'] = BodyDimension
self.itemtype_dict['adaf49ad-8e10-49f8-9783-174819e97051'] = CardiacProfile
self.itemtype_dict['7ea7a1f9-880b-4bd4-b593-f5660f20eda8'] = Condition
self.itemtype_dict['9c29c6b9-f40e-44ff-b24e-fba6f3074638'] = DailyDietaryIntake
self.itemtype_dict['d4b48e6b-50fa-4ba8-ac73-7d64a68dc328'] = WebLink
self.itemtype_dict['9085cad9-e866-4564-8a91-7ad8685d204d'] = PapSession
self.itemtype_dict['5d8419af-90f0-4875-a370-0f881c18f6b3'] = PeakFlow
self.itemtype_dict['25c94a9f-9d3d-4576-96dc-6791178a8143'] = Contact
self.itemtype_dict['21d75546-8717-4deb-8b17-a57f48917790'] = HealthJournalEntry
self.itemtype_dict['98f76958-e34f-459b-a760-83c1699add38'] = Cholesterol
self.itemtype_dict['df4db479-a1ba-42a2-8714-2b083b88150f'] = Procedure
self.itemtype_dict['1e1ccbfc-a55d-4d91-8940-fa2fbf73c195'] = ContinuityOfCareRecord
self.itemtype_dict['5800eab5-a8c2-482a-a4d6-f1db25ae08c3'] = LabResults
self.itemtype_dict['9ad2a94f-c6a4-4d78-8b50-75b65be0e250'] = ApplicationDataReference
self.itemtype_dict['162dd12d-9859-4a66-b75f-96760d67072b'] = PersonalContactInformation
self.itemtype_dict['58fd8ac4-6c47-41a3-94b2-478401f0e26c'] = HealthAssessment
self.itemtype_dict['1ed1cba6-9530-44a3-b7b5-e8219690ebcf'] = ClinicalDocumentArchitecture
self.itemtype_dict['a5033c9d-08cf-4204-9bd3-cb412ce39fc0'] = AppspecificInformation
self.itemtype_dict['62160199-b80f-4905-a55a-ac4ba825ceae'] = Hba1c
self.itemtype_dict['031f5706-7f1a-11db-ad56-7bd355d89593'] = SleepJournalEntry
self.itemtype_dict['b8fcb138-f8e6-436a-a15d-e3a2d6916094'] = MicrobiologyLabTestResult
self.itemtype_dict['cc23422c-4fba-4a23-b52a-c01d6cd53fdf'] = FamilyHistoryPerson
self.itemtype_dict['caff3ff3-812f-44b1-9c9f-c1af13167705'] = Menstruation
self.itemtype_dict['73822612-c15f-4b49-9e65-6af369e55c65'] = VitalSigns
self.itemtype_dict['dad8bb47-9ad0-4f09-a020-0ff051d1d0f7'] = HealthGoal
self.itemtype_dict['02ef57a2-a620-425a-8e92-a301542cca54'] = DischargeSummary
self.itemtype_dict['cdfc0a9b-6d3b-4d16-afa8-02b86d621a8d'] = MedicalImageStudy
self.itemtype_dict['879e7c04-4e8a-4707-9ad3-b054df467ce4'] = BloodGlucose
self.itemtype_dict['415c95e0-0533-4d9c-ac73-91dc5031186c'] = CarePlan
self.itemtype_dict['184166be-8adb-4d9c-8162-c403040e31ad'] = InsulinInjectionUsage
self.itemtype_dict['074e122a-335a-4a47-a63d-00a8f3e79e60'] = MealDefinition
self.itemtype_dict['bd0403c5-4ae2-4b0e-a8db-1888678e4528'] = File
self.itemtype_dict['e1f92d7f-9699-4483-8223-8442874ec6d9'] = ExerciseSamples
self.itemtype_dict['9c48a2b8-952c-4f5a-935d-f3292326bf54'] = ContinuityOfCareDocument
self.itemtype_dict['11c52484-7f1a-11db-aeac-87d355d89593'] = SleepSession
self.itemtype_dict['ff9ce191-2096-47d8-9300-5469a9883746'] = AsthmaInhaler
self.itemtype_dict['7ab3e662-cc5b-4be2-bf38-78f8aad5b161'] = MedicalAnnotation
self.itemtype_dict['18adc276-5144-4e7e-bf6c-e56d8250adf8'] = BodyComposition
self.itemtype_dict['3d34d87e-7fc1-4153-800f-f56592cb0d17'] = Weight
self.itemtype_dict['d33a32b2-00de-43b8-9f2a-c4c7e9f580ec'] = Status
self.itemtype_dict['80cf4080-ad3f-4bb5-a0b5-907c22f73017'] = DiabeticProfile
self.itemtype_dict['5fd15cb7-b717-4b1c-89e0-1dbcf7f815dd'] = RespiratoryProfile
self.itemtype_dict['089646a6-7e25-4495-ad15-3e28d4c1a71d'] = FoodDrink
self.itemtype_dict['7b2ea78c-4b78-4f75-a6a7-5396fe38b09a'] = AerobicProfile
self.itemtype_dict['b7925180-d69e-48fa-ae1d-cb3748ca170e'] = WeightGoal
self.itemtype_dict['03efe378-976a-42f8-ae1e-507c497a8c6d'] = AsthmaInhalerUsage
self.itemtype_dict['4b7971d6-e427-427d-bf2c-2fbcf76606b3'] = EmotionalState
self.itemtype_dict['4a04fcc8-19c1-4d59-a8c7-2031a03f21de'] = FamilyHistory
self.itemtype_dict['d3170d30-a41b-4bde-a116-87698c8a001a'] = CalorieGuideline
self.itemtype_dict['e4501363-fb95-4a11-bb60-da64e98048b5'] = WeeklyAerobicExerciseGoal
self.itemtype_dict['30cafccc-047d-4288-94ef-643571f7919d'] = Medication
self.itemtype_dict['822a5e5a-14f1-4d06-b92f-8f3f1b05218f'] = AdvanceDirective
self.itemtype_dict['3b3c053b-b1fe-4e11-9e22-d4b480de74e8'] = InsulinInjection
self.itemtype_dict['d65ad514-c492-4b59-bd05-f3f6cb43ceb3'] = AllergicEpisode
self.itemtype_dict['464083cc-13de-4f3e-a189-da8e47d5651b'] = Encounter
self.itemtype_dict['a5294488-f865-4ce3-92fa-187cd3b58930'] = PersonalPicture
self.itemtype_dict['85a21ddb-db20-4c65-8d30-33c899ccf612'] = Exercise
self.itemtype_dict['7ea47715-cba4-47f0-99d2-eb0a9fb4a85c'] = HealthcareProxy
self.itemtype_dict['3a54f95f-03d8-4f62-815f-f691fc94a500'] = BloodOxygenSaturation
self.itemtype_dict['cd3587b5-b6e1-4565-ab3b-1c3ad45eb04f'] = Immunization
self.itemtype_dict['72dc49e1-1486-4634-b651-ef560ed051e5'] = Message
self.itemtype_dict['92ba621e-66b3-4a01-bd73-74844aed4f5b'] = PersonalDemographicInformation
self.itemtype_dict['40750a6a-89b2-455c-bd8d-b420a4cb500b'] = Height
self.itemtype_dict['46d485cf-2b84-429d-9159-83152ba801f4'] = Pregnancy
self.itemtype_dict['5e2c027e-3417-4cfc-bd10-5a6f2e91ad23'] = MedicalProblem
self.itemtype_dict['ef9cf8d5-6c0b-4292-997f-4047240bc7be'] = MedicalDevice
self.itemtype_dict['e4911bd3-61bf-4e10-ae78-9c574b888b8f'] = RadiologyResult
self.itemtype_dict['aea2e8f2-11dd-4a7d-ab43-1d58764ebc19'] = Concern
self.itemtype_dict['66ac44c7-1d60-4e95-bb5b-d21490e91057'] = GroupMembership
self.itemtype_dict['e75fa095-31ed-4b30-b5f7-463963b5e734'] = GroupMembershipActivity
self.itemtype_dict['167ecf6b-bb54-43f9-a473-507b334907e0'] = MedicationFill
self.itemtype_dict['a9a76456-0357-493e-b840-598bbb9483fd'] = DailyMedicationUsage
self.itemtype_dict['9366440c-ec81-4b89-b231-308a4c4d70ed'] = InsurancePlan
self.itemtype_dict['c3353437-7a5e-46be-8e1a-f93dac872a68'] = EducationSifStudentAcademicRecord
self.itemtype_dict['9df1163d-eae1-405e-8a66-8aaf19bd5fc7'] = EducationDocument
self.itemtype_dict['4b18aeb6-5f01-444c-8c70-dbf13a2f510b'] = Appointment
self.itemtype_dict['c9287326-bb43-4194-858c-8b60768f000f'] = PasswordprotectedPackage
self.itemtype_dict['609319bf-35cc-40a4-b9d7-1b329679baaa'] = LifeGoal
self.itemtype_dict['a3d38add-b7b2-4ccd-856b-9b14bbc4e075'] = DefibrillatorEpisode
self.itemtype_dict['356fbba9-e0c9-4f4f-b0d9-4594f2490d2f'] = ExplanationOfBenefits
self.itemtype_dict['1572af76-1653-4c39-9683-9f9ca6584ba3'] = HealthEvent
self.itemtype_dict['046d0ad7-6d7f-4bfd-afd4-4192ca2e913d'] = Contraindication
self.itemtype_dict['3b3e6b16-eb69-483c-8d7e-dfe116ae6092'] = BasicDemographicInformation
def get_class(self, typeid):
return self.itemtype_dict[typeid]
|
from .augmentations import get_augmentation
from .preprocesses import get_preprocess
|
import DistributedTreasureAI
from toontown.toonbase import ToontownGlobals
class DistributedSZTreasureAI(DistributedTreasureAI.DistributedTreasureAI):
def __init__(self, air, treasurePlanner, x, y, z):
DistributedTreasureAI.DistributedTreasureAI.__init__(self, air, treasurePlanner, x, y, z)
self.healAmount = treasurePlanner.healAmount
def validAvatar(self, av):
return av.hp > 0 and av.hp < av.maxHp
def d_setGrab(self, avId):
DistributedTreasureAI.DistributedTreasureAI.d_setGrab(self, avId)
if self.air.doId2do.has_key(avId):
av = self.air.doId2do[avId]
if av.hp > 0 and av.hp < av.maxHp:
if simbase.air.holidayManager.currentHolidays.has_key(ToontownGlobals.VALENTINES_DAY):
av.toonUp(self.healAmount * 2)
else:
av.toonUp(self.healAmount)
|
""" Get all preassembled statements, prune away unneeded information, pickle
them. """
import sys
import pickle
from delphi.utils.indra import get_statements_from_json_file
if __name__ == "__main__":
all_sts = get_statements_from_json_file(sys.argv[1])
with open(sys.argv[2], "wb") as f:
pickle.dump(all_sts, f)
|
ELASTICSEARCH = {
"default": {}
}
"""
Elasticsearch server settings, see _`elasticsearch-py` docs for all available options
.. _elasticsearch-py: https://elasticsearch-py.readthedocs.io/en/master/index.html
Example settings::
# With security and via HTTPS using RFC-1738 URL
ELASTICSEARCH = {
"default": {"hosts": ["https://user:secret@host1:443]},
}
# - OR -
# With security and via HTTPS (long form)
ELASTICSEARCH = {
"default": {
"hosts": ["host1", "host2"],
"http_auth": ("user", "secret"),
"scheme": "https",
"port": 443,
},
}
"""
|
# python3
# -*- coding: utf-8 -*-
# @Time : 2022/1/16 22:21
# @Author : yzyyz
# @Email : youzyyz1384@qq.com
# @File : word_analyze.py
# @Software: PyCharm
from nonebot import on_command, logger, on_message
from nonebot.adapters.onebot.v11 import Bot, GroupMessageEvent, MessageSegment
from nonebot.adapters.onebot.v11.permission import GROUP_ADMIN, GROUP_OWNER
from nonebot.permission import SUPERUSER
from .utils import init, replace_tmr, participle_simple_handle, check_func_status
from pathlib import Path
import os
from .path import *
word_start = on_command("记录本群", block=True, priority=1, permission=GROUP_ADMIN | GROUP_OWNER | SUPERUSER)
@word_start.handle()
async def _(bot: Bot, event: GroupMessageEvent):
gid = str(event.group_id)
status = await check_func_status("wordcloud", gid)
if status:
if not os.path.exists(word_path):
await init()
with open(word_path, 'r+', encoding='utf-8') as c:
txt = c.read().split("\n")
if gid not in txt:
c.write(gid + "\n")
c.close()
logger.info(f"开始记录{gid}")
await word_start.finish("成功")
else:
logger.info(f"{gid}已存在")
await word_start.finish(f"{gid}已存在")
else:
await word_start.finish("请先发送【开关群词云】开启此功能")
word_stop = on_command("停止记录本群", block=True, priority=1, permission=GROUP_ADMIN | GROUP_OWNER | SUPERUSER)
@word_stop.handle()
async def _(bot: Bot, event: GroupMessageEvent):
gid = str(event.group_id)
status = await check_func_status("wordcloud", gid)
if status:
if not os.path.exists(word_path):
await init()
txt = open(word_path, 'r', encoding='utf-8').read()
if gid in txt:
with open(word_path,'w',encoding='utf-8') as c:
c.write(txt.replace(gid,""))
c.close()
logger.info(f"停止记录{gid}")
await word_start.finish("成功,曾经的记录不会被删除")
else:
logger.info(f"停用失败:{gid}不存在")
await word_start.finish(f"停用失败:{gid}不存在")
else:
await word_start.finish("请先发送【开关群词云】开启此功能")
word = on_message(priority=10, block=False)
@word.handle()
async def _(bot: Bot, event: GroupMessageEvent):
"""
记录聊天内容
:param bot:
:param event:
:return:
"""
gid = str(event.group_id)
msg = str(event.get_message()).replace(" ", "")
path_temp = words_contents_path / f"{str(gid)}.txt"
if not os.path.exists(word_path):
await init()
txt = open(word_path, "r", encoding="utf-8").read().split("\n")
if gid in txt:
msg = await replace_tmr(msg)
with open(path_temp, "a+") as c:
c.write(msg + "\n")
cloud = on_command("群词云", priority=1)
@cloud.handle()
async def _(bot: Bot, event: GroupMessageEvent):
from wordcloud import WordCloud
import jieba
ttf_name_ = Path() / "resource" / "msyhblod.ttf"
gid = str(event.group_id)
path_temp = words_contents_path / f"{str(gid)}.txt"
dir_list = os.listdir(words_contents_path)
status = await check_func_status("wordcloud", gid)
if status:
if gid + ".txt" in dir_list:
text = open(path_temp).read()
txt = jieba.lcut(text)
stop_ = await participle_simple_handle()
string = " ".join(txt)
try:
wc = WordCloud(font_path=str(ttf_name_.resolve()), width=800, height=600, mode='RGBA',
background_color="#ffffff", stopwords=stop_).generate(string)
img = Path(re_img_path / f"{gid}.png")
wc.to_file(img)
await cloud.send(MessageSegment.image(img))
except Exception as err:
await cloud.send(f"出现错误{type(err)}:{err}")
else:
await cloud.send("请先发送【开关群词云】开启此功能")
|
# -*- coding: utf-8 -*-
# pylint: disable=wildcard-import
from __future__ import absolute_import
from .dataset import *
from .datasets import *
from .dataloader import *
from . import transforms
from .stratified_sampler import StratifiedSampler
from . import utils
|
#--
# Copyright (c) 2012-2014 Net-ng.
# All rights reserved.
#
# This software is licensed under the BSD License, as described in
# the file LICENSE.txt, which you should have received as part of
# this distribution.
#--
"""adding max cards
Revision ID: b740362087
Revises: 537fa16b46e7
Create Date: 2013-09-19 17:37:37.027495
"""
# revision identifiers, used by Alembic.
revision = 'b740362087'
down_revision = '537fa16b46e7'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('column', sa.Column('nb_max_cards', sa.Integer))
def downgrade():
op.drop_column('column', 'nb_max_cards')
|
#!/usr/bin/env python3
# Append GPL3 license notice into the top of ian existing file.
# Note: If you just trying to append to an empty file, just use
# echo "data" | cat >> empty_file.txt
# Usage:
# cap <file>
#
import sys
gpl_license = '''/* -Insert project name and what it does-
Copyright (C) 2018 faraco <skelic3@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
'''
with open(sys.argv[1], 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(gpl_license.rstrip('\r\n') + '\n' + content)
|
def main_report():
print("Hey I am a function inside mainscript.py") |
# Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
import unittest
from vdk.internal.builtin_plugins.job_properties.datajobs_service_properties import (
DataJobsServiceProperties,
)
from vdk.internal.builtin_plugins.job_properties.inmemproperties import (
InMemPropertiesServiceClient,
)
class DataJobsServicePropertiesTest(unittest.TestCase):
def setUp(self) -> None:
self.data_jobs_service_props = DataJobsServiceProperties(
"foo", "test-team", InMemPropertiesServiceClient()
)
def test_data_jobs_service_properties_not_allowed_keys(self):
with self.assertRaises(Exception):
self.data_jobs_service_props.set_all_properties(
{" key_with_extra_spaces": "value"}
)
with self.assertRaises(Exception):
self.data_jobs_service_props.set_all_properties(
{"key with spaces": "value"}
)
with self.assertRaises(Exception):
self.data_jobs_service_props.set_all_properties(
{1234: "value"}
) # non-string type key
def test_data_jobs_service_properties_allowed_keys(self):
self.data_jobs_service_props.set_all_properties({"words": "value"})
self.data_jobs_service_props.set_all_properties({"123": "value"})
self.data_jobs_service_props.set_all_properties({"a.dot": "value"})
self.data_jobs_service_props.set_all_properties({"a_underscore": "value"})
self.data_jobs_service_props.set_all_properties({"a-dash": "value"})
def test_data_jobs_service_properties_not_allowed_values(self):
with self.assertRaises(Exception): # objects not allowed
self.data_jobs_service_props.set_all_properties({"key": object()})
with self.assertRaises(Exception): # byte arrays not allowed
self.data_jobs_service_props.set_all_properties({"key": bytearray()})
def test_data_jobs_service_properties_allowed_values(self):
self.data_jobs_service_props.set_all_properties({"key": 1})
self.data_jobs_service_props.set_all_properties({"key": "str"})
self.data_jobs_service_props.set_all_properties({"key": 3.14})
self.data_jobs_service_props.set_all_properties({"key": "unicode"})
self.data_jobs_service_props.set_all_properties({"key": r"raw"})
self.data_jobs_service_props.set_all_properties({"key": None})
self.data_jobs_service_props.set_all_properties({"key": dict()})
def test_data_jobs_service_properties(self):
self.assertEqual(
self.data_jobs_service_props.get_property("nonexistentproperty"), None
)
self.assertEqual(
self.data_jobs_service_props.get_property(
"nonexistentproperty", "default value"
),
"default value",
)
self.data_jobs_service_props.set_all_properties(
{"key1": "value1", "key2": dict()}
)
self.assertEqual(self.data_jobs_service_props.get_property("key1"), "value1")
self.assertEqual(self.data_jobs_service_props.get_property("key2"), dict())
self.assertEqual(
self.data_jobs_service_props.get_all_properties(),
{"key1": "value1", "key2": dict()},
)
|
# -*- coding: utf-8 -*-
import unittest
from hrt.base import AbstractScript
from hrt import script
from .templates import (code_begin_python, code_search_python, code_python, code_post_python, code_search_ruby,
code_begin_ruby, code_ruby, code_post_ruby, code_begin_bash, code_search_bash, code_bash,
code_post_bash, code_search_php, code_php, code_begin_php, code_post_php)
class TestScripts(unittest.TestCase):
def setUp(self):
self.headers = ['Host: google.com']
self.details = {
'protocol': 'HTTP',
'pre_scheme': 'https://',
'Host': 'google.com',
'version': '1.1',
'path': '/robots.txt',
'method': 'GET',
'proxy_port': '2223',
'proxy_host': 'http://xyz.com'}
self.second_headers = ['Host: www.codepunker.com']
self.second_details = {
'protocol': 'HTTP',
'pre_scheme': 'https://',
'Host': 'www.codepunker.com',
'version': '1.1',
'path': '/tools/http-requests',
'method': 'POST',
'data': 'extra=whoAreYou'
}
self.code_search = """hello3131\"you\\"are'awesome"""
self.script_list = []
for script_class in AbstractScript.__subclasses__():
self.script_list.append(script_class(headers=self.headers, details=self.details))
def test_generate_search(self):
for script_name in self.script_list:
result = script_name._generate_search(self.code_search)
self.assertEqual(
result,
globals()["code_search_" + script_name.__language__],
'Invalid generation of search code for {}'.format(script_name.__class__.__name__))
def test_generate_proxy(self):
code_proxy = {
'bash': " -x http://xyz.com:2223",
'php': "\ncurl_setopt($ch, CURLOPT_PROXY, 'http://xyz.com:2223');\n",
'python': "\n curl_handler.setopt(curl_handler.PROXY, 'http://xyz.com:2223')\n",
'ruby': "\n proxy: 'http://xyz.com:2223',\n"}
for script_name in self.script_list:
result = script_name._generate_proxy()
self.assertEqual(
result,
code_proxy[script_name.__language__],
'Invalid generation of proxy code for {}'.format(script_name.__class__.__name__))
def test_generate_script(self):
for script_name in self.script_list:
result = script_name.generate_script()
self.assertEqual(
result,
globals()["code_" + script_name.__language__],
'Invalid generation of GET script for {}'.format(script_name.__class__.__name__))
def test_post_generate_script(self):
for script_name in self.script_list:
script_name.url = ''
result = script_name.generate_script(headers=self.second_headers, details=self.second_details)
self.assertEqual(
result,
globals()["code_post_" + script_name.__language__],
'Invalid generation of POST script for {}'.format(script_name.__class__.__name__))
def test_generate_post(self):
code_post = {
'bash': ' --data "hello7World\'Ω≈ç√∫˜µ≤≥÷田中さんにあげて下さい,./;[]\-=<>?:\\"{}|_+!@#$%^&*()`" ',
'php': '\n$content = "hello7World\'Ω≈ç√∫˜µ≤≥÷田中さんにあげて下さい,./;[]\-=<>?:\\"{}|_+!@#$%^&*()`";\ncurl_setopt($ch, CURLOPT_POST, 1);\ncurl_setopt($ch, CURLOPT_POSTFIELDS, $content);\n',
'python': '\n # Sets request method to POST\n curl_handler.setopt(curl_handler.POSTFIELDS, "hello7World\'Ω≈ç√∫˜µ≤≥÷田中さんにあげて下さい,./;[]\-=<>?:\\"{}|_+!@#$%^&*()`") #expects body to urlencoded\n',
'ruby': '\n body: "hello7World\'Ω≈ç√∫˜µ≤≥÷田中さんにあげて下さい,./;[]\-=<>?:\\"{}|_+!@#$%^&*()`"\n'}
self.details['data'] = 'hello7World\'Ω≈ç√∫˜µ≤≥÷田中さんにあげて下さい,./;[]\-=<>?:"{}|_+!@#$%^&*()`'
for script_name in self.script_list:
result = script_name._generate_post()
self.assertEqual(
result,
code_post[script_name.__language__],
'Invalid generation of post code for {}'.format(script_name.__class__.__name__))
def test_generate_begin(self):
for script_name in self.script_list:
result = script_name._generate_begin()
self.assertEqual(
result,
globals()["code_begin_" + script_name.__language__],
'Invalid generation of begin code for {}'.format(script_name.__class__.__name__))
def test_create_url(self):
for script_name in self.script_list:
script_name.details['Host'] = 'wrongurl..'
with self.assertRaises(ValueError):
script_name.create_url()
def test_encode_url(self):
for script_name in self.script_list:
script_name.details['data'] = "?xx"
result = script_name.encode_url(script_name.url)
self.assertEqual(
result,
'https://google.com/robots.txt%3Fxx',
'Invalid generation of begin code for {}'.format(script_name.__class__.__name__))
if __name__ == '__main__':
unittest.main()
|
from . import io
from . import local_config
from . import ndarray
from . import misc
from . import plots
|
import psycopg2
## connect to the db
host = "localhost"
db = "stock_selector_db"
user = "postgres"
pw = "123"
conn = psycopg2.connect(
host = host,
database = db,
user = user,
password = pw)
cur = conn.cursor()
## insert data
while True:
index = input("what index do you want to register?\n")
cur.execute('''
SELECT "name" FROM indices WHERE "name" = %s''', (index,))
try:
duplicate = cur.fetchone()[0]
print("this index is already on the database")
continue
except TypeError:
pass
print("are you sure you want to add this index to the database\n",index)
answer = input("(y/n)\n")
if answer == 'y':
cur.execute ('''
INSERT INTO indices ("name") VALUES (%s);
''', (index,))
break
else:
continue
## save the data in the database
conn.commit()
## close the connection
cur.close()
conn.close()
print('done') |
#! /usr/bin/env python3
"""
Chain of joltage adapters
"""
from pathlib import Path
import collections
from rich import print
import rich.traceback
rich.traceback.install()
TEST = False
# TEST = True
if TEST:
p = Path(__file__).with_name('day10part1-sample.txt')
expected_result_for_1 = 22
expected_result_for_3 = 10
else:
p = Path(__file__).with_name('day10part1-input.txt')
with p.open('r') as f:
adapters = [int(l) for l in f.readlines() if l.strip()]
adapters.append(0) # This represents the charging port
adapters.sort()
adapters.append(adapters[-1]+3) # This represents the device being charged
# Step through each adapter and calculate the total number of combinations for getting to that
# adapter's joltage. This is equal to
# the number of ways to get to (this joltage-3) [note this is 0 if no adapter exists for that joltage]
# + the number of ways to get to (this joltage-2) [note this is 0 if no adapter exists for that joltag]
# + the number of ways to get to (this joltage-3) [yep, same note as above]
#
# combinations is a dictionary where the keys are the adapter joltages and the values are the number
# of combinations of adapters that can reach that joltage.
combinations = {0:1} # We always start at 0 jolts, so there is 1 combination for that joltage
for j in adapters[1:]:
combinations[j] = combinations.get(j-3, 0) + combinations.get(j-2, 0) + combinations.get(j-1, 0)
print(f'Number of combinations to reach {adapters[-1]} jolts is {combinations[adapters[-1]]}')
|
"""
The :py:mod:`training` module provides a generic continual learning training
class (:py:class:`BaseStrategy`) and implementations of the most common
CL strategies. These are provided either as standalone strategies in
:py:mod:`training.strategies` or as plugins (:py:mod:`training.plugins`) that
can be easily combined with your own strategy.
"""
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020 Érik Martin-Dorel
#
# Contributed under the terms of the MIT license,
# cf. <https://spdx.org/licenses/MIT.html>
from bash_formatter import BashLike
from datetime import datetime
import base64
import copy
import json
import requests
import os
import sys
import time
import yaml
debug = False
output_directory = 'generated'
images_filename = 'images.yml'
json_indent = 2
upstream_project = 'erikmd/docker-keeper'
upstream_url = 'https://gitlab.com/%s' % upstream_project
def print_stderr(message):
print(message, file=sys.stderr, flush=True)
def dump(data):
"""Debug"""
print_stderr(json.dumps(data, indent=json_indent))
# def error(msg, flush=True):
# print(msg, file=sys.stderr, flush=flush)
# exit(1)
class Error(Exception):
"""Base class for exceptions in this module."""
pass
def error(msg):
raise Error(msg)
def uniqify(s):
"""Remove duplicates and sort the result list."""
return sorted(set(s))
def diff_list(l1, l2):
"""Compute the set-difference (l1 - l2), preserving duplicates."""
return list(filter(lambda e: e not in l2, l1))
def meet_list(l1, l2):
"""Return the sublist of l1, intersecting l2."""
return list(filter(lambda e: e in l2, l1))
def subset_list(l1, l2):
"""Check if l1 is included in l2."""
return not diff_list(l1, l2)
def is_unique(s):
"""Check if the list s has no duplicate."""
return len(s) == len(set(s))
def merge_dict(a, b):
"""Merge the fields of a and b, the latter overriding the former."""
res = copy.deepcopy(a) if a else {}
copyb = copy.deepcopy(b) if b else {}
for key in copyb:
res[key] = copyb[key]
return res
def check_string(value, ident=None):
if not isinstance(value, str):
if ident:
error("Error: expecting a string value, but was given '%s: %s'."
% (ident, value))
else:
error("Error: expecting a string value, but was given '%s'."
% value)
def check_list(value, text=None):
if not isinstance(value, list):
if not text:
text = str(value)
error("Error: not (JSON) list\nText: %s"
% text)
def check_dict(value, text=None):
if not isinstance(value, dict):
if not text:
text = str(value)
error("Error: not (JSON) dict\nText: %s"
% text)
def eval_bashlike(template, matrix, defaults=None):
b = BashLike()
return b.format(template, matrix=matrix, defaults=defaults)
def get_build_date():
"""ISO 8601 UTC timestamp"""
return datetime.utcnow().strftime("%FT%TZ")
def naive_url_encode(name):
"""https://gitlab.com/help/api/README.md#namespaced-path-encoding"""
check_string(name)
return name.replace('/', '%2F')
def gitlab_lambda_query_sha1(response):
"""Return the "commit.id" field from 'response.json()'."""
return response.json()['commit']['id']
def lambda_query_text(response):
return response.text
def get_url(url, headers=None, params=None, lambda_query=(lambda r: r)):
"""Some examples of lambda_query:
- gitlab_lambda_query_sha1
- lambda_query_text
"""
print_stderr('GET %s\n' % url)
response = requests.get(url, headers=headers, params=params)
if not response:
error("Error!\nCode: %d\nText: %s"
% (response.status_code, response.text))
return lambda_query(response)
def get_commit(commit_api):
"""Get GitHub or GitLab SHA1 of a given branch."""
fetcher = commit_api['fetcher']
repo = commit_api['repo']
branch = commit_api['branch']
if fetcher == 'github':
url = 'https://api.github.com/repos/%s/commits/%s' % (repo, branch)
headers = {"Accept": "application/vnd.github.v3.sha"}
lambda_query = lambda_query_text
elif fetcher == 'gitlab':
# https://gitlab.com/help/api/branches.md#get-single-repository-branch
url = ('https://gitlab.com/api/v4/projects/%s/repository/branches/%s'
% (naive_url_encode(repo), naive_url_encode(branch)))
headers = None
lambda_query = gitlab_lambda_query_sha1
else:
error("Error: do not support 'fetcher: %s'" % fetcher)
return get_url(url, headers, None, lambda_query)
def load_spec():
"""Parse the YAML file and return a dict."""
print_stderr("Loading '%s'..." % images_filename)
with open(images_filename) as f:
j = yaml.safe_load(f)
if 'active' not in j or not j['active']:
print_stderr("""
WARNING: the 'docker-keeper' tasks are not yet active.
Please update your %s specification and Dockerfile templates.
Then, set the option 'active: true' in the %s file."""
% (images_filename, images_filename))
exit(1)
return j
def product_build_matrix(matrix):
"""Get the list of dicts grouping 1 item per list mapped to matrix keys."""
assert matrix
old = [{}]
res = []
for key in matrix:
for value in matrix[key]:
for e in old:
enew = copy.deepcopy(e)
enew[key] = value
res.append(enew)
old = res
res = []
return old
def check_trim_relative_path(path):
"""Fail if path is absolute and remove leading './'."""
check_string(path)
if path[0] == '/':
error("Error: expecting a relative path, but was given '%s'." % path)
elif path[:2] == './':
return path[2:]
else:
return path
def check_filename(filename):
check_string(filename)
if '/' in filename:
error("Error: expecting a filename, but was given '%s'." % filename)
def eval_if(raw_condition, matrix):
"""Evaluate YAML condition.
Supported forms:
'{matrix[key]} == "string"'
'{matrix[key]} != "string"'
'"{matrix[key]}" == "string"'
'"{matrix[key]}" != "string"'
"""
# Conjunction
if isinstance(raw_condition, list):
for item_condition in raw_condition:
e = eval_if(item_condition, matrix)
if not e:
return False
return True
elif raw_condition is None:
return True
check_string(raw_condition)
equality = (raw_condition.find("==") > -1)
inequality = (raw_condition.find("!=") > -1)
if equality:
args = raw_condition.split("==")
elif inequality:
args = raw_condition.split("!=")
else:
error("Unsupported condition: '%s'." % raw_condition)
if len(args) != 2:
error("Wrong number of arguments: '%s'." % raw_condition)
a = eval_bashlike(args[0].strip().replace('"', ''), matrix)
b = eval_bashlike(args[1].strip().replace('"', ''), matrix)
if equality:
return a == b
else:
return a != b
def get_list_dict_dockerfile_matrix_tags_args(json):
"""Get list of dicts containing the following keys:
- "context": "…"
- "dockerfile": "…/Dockerfile"
- "path": "…/…/Dockerfile"
- "matrix": […]
- "tags": […]
- "args": […]
- "keywords": […]
- "after_deploy_script": […]
"""
# TODO later-on: fix (dockerfile / path) semantics
res = []
images = json['images']
for item in images:
list_matrix = product_build_matrix(item['matrix'])
if 'dockerfile' in item['build']:
dfile = check_trim_relative_path(item['build']['dockerfile'])
else:
dfile = 'Dockerfile'
context = check_trim_relative_path(item['build']['context'])
path = '%s/%s' % (context, dfile)
raw_tags = item['build']['tags']
args1 = json['args'] if 'args' in json else {}
args2 = item['build']['args'] if 'args' in item['build'] else {}
raw_args = merge_dict(args1, args2)
if 'keywords' in item['build']:
raw_keywords = item['build']['keywords']
else:
raw_keywords = []
if 'after_deploy' in item['build']:
raw_after_deploy = item['build']['after_deploy']
# support both
# after_deploy: 'code'
# and
# after_deploy:
# - 'code'
# as well as
# after_deploy:
# - run: 'code'
# if: '{matrix[base]} == 4.07.1-flambda'
# and regarding interpolation, we can add:
# after_deploy_export:
# variable_name: 'value-{matrix[coq]}'
# to prepend the after_deploy_script with export commands
if isinstance(raw_after_deploy, str):
raw_after_deploy = [raw_after_deploy]
else:
raw_after_deploy = []
if 'after_deploy_export' in item['build']:
raw_after_deploy_export = item['build']['after_deploy_export']
check_dict(raw_after_deploy_export)
else:
raw_after_deploy_export = {}
for matrix in list_matrix:
tags = []
for tag_item in raw_tags:
tag_template = tag_item['tag']
tag_cond = tag_item['if'] if 'if' in tag_item else None
if eval_if(tag_cond, matrix):
# otherwise skip the tag synonym
tag = eval_bashlike(tag_template, matrix) # & defaults ?
tags.append(tag)
defaults = {"build_date": get_build_date()}
if 'commit_api' in item['build']:
commit_api = item['build']['commit_api']
defaults['commit'] = get_commit(commit_api)
args = {}
for arg_key in raw_args:
arg_template = raw_args[arg_key]
args[arg_key] = eval_bashlike(arg_template, matrix, defaults)
keywords = list(map(lambda k: eval_bashlike(k, matrix, defaults),
raw_keywords))
after_deploy_export = []
# Note: This could be a map:
for var in raw_after_deploy_export:
check_string(var)
var_template = raw_after_deploy_export[var]
var_value = eval_bashlike(var_template, matrix, defaults)
# TODO soon: think about quoting var_value
after_deploy_export.append("export %s='%s'" % (var, var_value))
if raw_after_deploy:
after_deploy_script = after_deploy_export
else:
after_deploy_script = []
for ad_item in raw_after_deploy:
if isinstance(ad_item, str):
after_deploy_script.append(ad_item) # no { } interpolation
# otherwise sth like ${BASH_VARIABLE} would raise an error
else:
script_item = ad_item['run']
script_cond = ad_item['if'] if 'if' in ad_item else None
if eval_if(script_cond, matrix):
# otherwise skip the script item
after_deploy_script.append(script_item)
newitem = {"context": context, "dockerfile": dfile,
"path": path,
"matrix": matrix, "tags": tags, "args": args,
"keywords": keywords,
"after_deploy_script": after_deploy_script}
res.append(newitem)
if debug:
dump(res)
return res
def gitlab_build_params_pagination(page, per_page):
"""https://docs.gitlab.com/ce/api/README.html#pagination"""
return {
'page': str(page),
'per_page': str(per_page)
}
def hub_build_params_pagination(page, per_page):
return {
'page': str(page),
'page_size': str(per_page)
}
def hub_lambda_list(j):
"""https://registry.hub.docker.com/v2/repositories/library/debian/tags"""
return list(map(lambda e: e['name'], j['results']))
def get_list_paginated(url, headers, params, lambda_list, max_per_sec=5):
"""Generic wrapper to handle GET requests with pagination.
If the response is a JSON list, use lambda_list=(lambda l: l).
REM: for https://registry.hub.docker.com/v2/repositories/_/_/tags,
one could use the "next" field to guess the following page."""
assert isinstance(max_per_sec, int)
assert max_per_sec > 0
assert max_per_sec <= 10
per_page = 50 # max allowed (by gitlab.com & hub.docker.com): 100
page = 0
allj = []
while(True):
page += 1
if page % max_per_sec == 0:
time.sleep(1.1)
page_params = hub_build_params_pagination(page, per_page)
all_params = merge_dict(params, page_params)
print("GET %s\n # page: %d"
% (url, page), file=sys.stderr, flush=True)
response = requests.get(url, headers=headers, params=all_params)
if not response:
error("Error!\nCode: %d\nText: %s"
% (response.status_code, response.text))
j = lambda_list(response.json())
check_list(j, text=response.text)
if j:
allj += j
else:
break
return allj
def get_remote_tags(spec):
repo = spec['docker_repo']
check_string(repo)
return get_list_paginated(
'https://registry.hub.docker.com/v2/repositories/%s/tags' % repo,
None, None, hub_lambda_list)
def minimal_rebuild(build_tags, remote_tags):
def pred(item):
return not subset_list(item['tags'], remote_tags)
return list(filter(pred, build_tags))
def to_rm(all_tags, remote_tags):
return diff_list(remote_tags, all_tags)
def get_script_directory():
"""$(cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd) in Python."""
return os.path.dirname(__file__)
def mkdir_dirname(filename):
"""Python3 equivalent to 'mkdir -p $(dirname $filename)"'."""
os.makedirs(os.path.dirname(filename), mode=0o755, exist_ok=True)
def fullpath(filename):
"""Get path of filename in output_directory/."""
return os.path.join(output_directory, filename)
def write_json_artifact(j, basename):
filename = fullpath(basename)
print_stderr("Generating '%s'..." % filename)
mkdir_dirname(filename)
with open(filename, 'w') as f:
json.dump(j, f, indent=json_indent)
def write_text_artifact(text, basename):
filename = fullpath(basename)
print_stderr("Generating '%s'..." % filename)
mkdir_dirname(filename)
with open(filename, 'w') as f:
f.write(text)
def write_list_text_artifact(seq, basename):
check_list(seq)
write_text_artifact('\n'.join(seq) + '\n', basename)
def write_build_data_all(build_data_all):
write_json_artifact(build_data_all, 'build_data_all.json')
def write_build_data_chosen(build_data):
write_json_artifact(build_data, 'build_data_chosen.json')
def write_build_data_min(build_data_min):
write_json_artifact(build_data_min, 'build_data_min.json')
def write_remote_tags(remote_tags):
write_list_text_artifact(remote_tags, 'remote_tags.txt')
def write_remote_tags_to_rm(remote_tags_to_rm):
write_json_artifact(remote_tags_to_rm, 'remote_tags_to_rm.json')
def write_list_dockerfile(seq):
"""To be used on the value of get_list_dict_dockerfile_matrix_tags_args."""
dockerfiles = uniqify(map(lambda e: e['path'], seq))
write_list_text_artifact(dockerfiles, 'Dockerfiles.txt')
def write_docker_repo(spec):
repo = spec['docker_repo'] + '\n'
write_text_artifact(repo, 'docker_repo.txt')
def read_json_artifact(basename):
filename = fullpath(basename)
print_stderr("Reading '%s'..." % filename)
with open(filename, 'r') as json_data:
j = json.load(json_data)
return j
def read_build_data_chosen():
return read_json_artifact('build_data_chosen.json')
def write_readme(base_url, build_data):
"""Read README.md and replace <!-- tags --> with a list of images
with https://gitlab.com/foo/bar/blob/master/Dockerfile hyperlinks.
"""
pattern = '<!-- tags -->'
check_string(base_url)
if base_url[-1] == '/':
base_url = base_url[:-1]
def readme_image(item):
return '- [`{tags}`]({url})'.format(
tags=('`, `'.join(item['tags'])),
url=('%s/blob/master/%s' % (base_url, item['path'])))
print_stderr("Reading the template 'README.md'...")
with open('README.md', 'r') as f:
template = f.read()
tags = ('# <a name="supported-tags"></a>'
'Supported tags and respective `Dockerfile` links\n\n%s'
% '\n'.join(map(readme_image, build_data)))
readme = template.replace(pattern, tags)
filename = fullpath('README.md')
print_stderr("Generating '%s'..." % filename)
mkdir_dirname(filename)
with open(filename, 'w') as f:
f.write(readme)
def get_check_tags(seq):
"""To be used on the value of get_list_dict_dockerfile_matrix_tags_args."""
res = []
for e in seq:
res.extend(e['tags'])
if is_unique(res):
print_stderr("OK: no duplicate tag found.")
else:
error("Error: there are some tags duplicates.")
return res
def merge_data(l1, l2):
"""Append to l1 the elements of l2 that do not belong to l1."""
extra = diff_list(l2, l1)
return l1 + extra
def get_nightly_only(spec):
spec2 = copy.deepcopy(spec)
images = spec2.pop('images')
def nightly(item):
return 'nightly' in item['build'] and item['build']['nightly']
images2 = list(filter(nightly, images))
spec2['images'] = images2
return get_list_dict_dockerfile_matrix_tags_args(spec2)
def print_list(title, seq):
print(title + ':' + ''.join(map(lambda e: '\n- ' + e, seq)))
def get_files_only(build_data_all, items_filename):
with open(items_filename, 'r') as fh:
dockerfiles = [item.strip() for item in fh.readlines()]
print_list('Specified Dockerfiles:', dockerfiles)
# TODO later-on: fix (dockerfile / path) semantics
def matching(item):
return item['path'] in dockerfiles
return list(filter(matching, build_data_all))
def get_tags_only(build_data_all, items_filename):
with open(items_filename, 'r') as fh:
tags = [item.strip() for item in fh.readlines()]
print_list('Specified tags:', tags)
def matching(item):
return meet_list(item['tags'], tags)
return list(filter(matching, build_data_all))
def get_keywords_only(build_data_all, items_filename):
with open(items_filename, 'r') as fh:
tags = [item.strip() for item in fh.readlines()]
print_list('Specified keywords:', tags)
def matching(item):
return meet_list(item['keywords'], tags)
return list(filter(matching, build_data_all))
def get_keyword_only(build_data_all, keyword):
print('Specified keyword: %s' % keyword)
def matching(item):
return keyword in item['keywords']
return list(filter(matching, build_data_all))
def get_version():
with open(os.path.join(get_script_directory(), 'VERSION'), 'r') as f:
version = f.read().strip()
return version
def get_upstream_version():
url = ('https://gitlab.com/api/v4/projects/%s/repository/files/VERSION'
% naive_url_encode(upstream_project))
def lambda_query_content(response):
return (base64.b64decode(response.json()['content'])
.decode('UTF-8').rstrip())
return get_url(url, None, {"ref": "master"}, lambda_query_content)
def equalize_args(record):
"""{"VAR1": "value1", "VAR2": "value2"} → ['VAR1=value1', 'VAR2=value2']"""
res = []
for key in record:
res.append("%s=%s" % (key, record[key]))
return res
def first_shortest_tag(list_tags):
return sorted(list_tags, key=(lambda s: (len(s), s)))[0]
def indent_script(list_after_deploy, indent_level, start=False):
check_list(list_after_deploy)
if list_after_deploy:
indent = " " * indent_level
if start:
return indent + ('\n' + indent).join(list_after_deploy)
else:
return ('\n' + indent).join(list_after_deploy)
else:
return ""
def escape_single_quotes(script):
return script.replace("'", "'\\''")
def generate_config(docker_repo):
data = read_build_data_chosen()
if not data:
return """---
# GitLab CI config automatically generated by docker-keeper; do not edit.
# yamllint disable rule:line-length rule:empty-lines
stages:
- build
noop:
stage: build
image: alpine:latest
variables:
GIT_STRATEGY: none
script:
- echo "No image to rebuild."
only:
- master
"""
yamlstr_init = """---
# GitLab CI config automatically generated by docker-keeper; do not edit.
# yamllint disable rule:line-length rule:empty-lines
stages:
- deploy
- remove
# Changes below (or jobs extending .docker-deploy) should be carefully
# reviewed to avoid leaks of HUB_TOKEN
.docker-deploy:
stage: deploy
only:
- master
variables:
HUB_REPO: "{var_hub_repo}"
# HUB_USER: # protected variable
# HUB_TOKEN: # protected variable
# FOO_TOKEN: # other, user-defined tokens for after_deploy_script
image: docker:latest
services:
- docker:dind
before_script:
- cat /proc/cpuinfo /proc/meminfo
- echo $0
- apk add --no-cache bash
- /usr/bin/env bash --version
- apk add --no-cache curl
- curl --version
- pwd
{var_jobs}"""
yamlstr_jobs = ''
job_id = 0
for item in data:
job_id += 1
yamlstr_jobs += """
deploy_{var_job_id}_{var_some_real_tag}:
extends: .docker-deploy
script: |
/usr/bin/env bash -e -c '
echo $0
. "{var_keeper_subtree}/gitlab_functions.sh"
dk_login
dk_build "{var_context}" "{var_dockerfile}" "{var_one_tag}" {vars_args}
dk_push "{var_hub_repo}" "{var_one_tag}" {vars_tags}
dk_logout
{var_after_deploy}' bash
""".format(var_context=item['context'],
var_dockerfile=item['dockerfile'],
vars_args=('"%s"' % '" "'.join(equalize_args(item['args']))),
vars_tags=('"%s"' % '" "'.join(item['tags'])),
var_keeper_subtree=get_script_directory(),
var_hub_repo=docker_repo,
var_one_tag=("image_%d" % job_id),
var_job_id=job_id,
var_some_real_tag=first_shortest_tag(item['tags']),
var_after_deploy=escape_single_quotes(
indent_script(item['after_deploy_script'], 6)))
return yamlstr_init.format(var_hub_repo=docker_repo,
var_jobs=yamlstr_jobs)
def usage():
print("""# docker-keeper
This python script is devised to help maintain Docker Hub repositories
of stable and dev (nightly build) Docker images from a YAML-specified,
single-branch GitLab repository - typically created as a fork of the
following repo: <https://gitlab.com/erikmd/docker-keeper-template>.
This script is meant to be run by GitLab CI.
## Syntax
```
keeper.py write-artifacts [OPTION]
Generate artifacts in the '%s' directory.
This requires having file '%s' in the current working directory.
OPTION can be:
--minimal (default option, can be omitted)
--nightly (same as --minimal + nightly-build images)
--rebuild-all (rebuild all images)
--rebuild-files FILE (rebuild images with Dockerfile mentioned in FILE)
--rebuild-tags FILE (rebuild images with tag mentioned in FILE)
--rebuild-keywords FILE (rebuild images with keyword mentioned in FILE)
--rebuild-keyword KEYWORD (rebuild images with specified keyword)
keeper.py generate-config
Print a GitLab CI YAML config to standard output.
This requires files:
- generated/build_data_chosen.json
- generated/remote_tags_to_rm.json
keeper.py --version
Print the script version.
keeper.py --upstream-version
Print the upstream version from %s
keeper.py --help
Print this documentation.
```
## Usage
* Fork <https://gitlab.com/erikmd/docker-keeper-template>.
* Follow the instructions of the README.md in your fork."""
% (output_directory, images_filename, upstream_url))
def main(args):
if args == ['--version']:
print(get_version())
exit(0)
elif args == ['--upstream-version']:
print(get_upstream_version())
elif args == ['generate-config']:
spec = load_spec() # could be avoided by writing yet another .json…
print(generate_config(spec['docker_repo']))
elif args == ['--help'] or args == []:
usage()
elif args[0] == 'write-artifacts':
spec = load_spec()
build_data_all = get_list_dict_dockerfile_matrix_tags_args(spec)
all_tags = get_check_tags(build_data_all)
remote_tags = get_remote_tags(spec)
build_data_min = minimal_rebuild(build_data_all, remote_tags)
remote_tags_to_rm = to_rm(all_tags, remote_tags)
if args[1:] == [] or args[1:] == ['--minimal']:
write_build_data_chosen(build_data_min)
elif args[1:] == ['--rebuild-all']:
write_build_data_chosen(build_data_all)
elif args[1:] == ['--nightly']:
nightly_only = get_nightly_only(spec)
build_data_nightly = merge_data(build_data_min, nightly_only)
write_build_data_chosen(build_data_nightly)
elif args[1] == '--rebuild-files':
if len(args) != 3:
print_stderr("Error: "
"--rebuild-files expects one argument exactly."
"\nWas: %s" % args)
usage()
exit(1)
rebuild_files_only = get_files_only(build_data_all, args[2])
build_data_files = merge_data(build_data_min, rebuild_files_only)
write_build_data_chosen(build_data_files)
elif args[1] == '--rebuild-tags':
if len(args) != 3:
print_stderr("Error: "
"--rebuild-files expects one argument exactly."
"\nWas: %s" % args)
usage()
exit(1)
rebuild_tags_only = get_tags_only(build_data_all, args[2])
build_data_tags = merge_data(build_data_min, rebuild_tags_only)
write_build_data_chosen(build_data_tags)
elif args[1] == '--rebuild-keywords':
if len(args) != 3:
print_stderr("Error: "
"--rebuild-keywords expects one argument exactly."
"\nWas: %s" % args)
usage()
exit(1)
rebuild_keywords_only = get_keywords_only(build_data_all, args[2])
build_data_tags = merge_data(build_data_min, rebuild_keywords_only)
write_build_data_chosen(build_data_tags)
elif args[1] == '--rebuild-keyword':
if len(args) != 3:
print_stderr("Error: "
"--rebuild-keyword expects one argument exactly."
"\nWas: %s" % args)
usage()
exit(1)
rebuild_keywords_only = get_keyword_only(build_data_all, args[2])
build_data_tags = merge_data(build_data_min, rebuild_keywords_only)
write_build_data_chosen(build_data_tags)
else:
print_stderr("Error: wrong arguments.\nWas: %s" % args)
usage()
exit(1)
write_build_data_all(build_data_all)
write_build_data_min(build_data_min)
write_remote_tags(remote_tags)
write_remote_tags_to_rm(remote_tags_to_rm)
write_list_dockerfile(build_data_all)
write_readme(spec['base_url'], build_data_all)
write_docker_repo(spec)
else:
print_stderr("Error: wrong arguments.\nWas: %s" % args)
usage()
exit(1)
###############################################################################
# Test suite, cf. <https://docs.python-guide.org/writing/tests/>
# $ pip3 install pytest
# $ py.test bash_formatter.py
def test_get_commit():
github = {"fetcher": "github", "repo": "coq/coq", "branch": "v8.0"}
github_expected = "6aecb9a1fe3f9b027dfd702931298bc61d40b6d3"
github_actual = get_commit(github)
assert github_actual == github_expected
gitlab = {"fetcher": "gitlab", "repo": "coq/coq", "branch": "v8.0"}
gitlab_expected = "6aecb9a1fe3f9b027dfd702931298bc61d40b6d3"
gitlab_actual = get_commit(gitlab)
assert gitlab_actual == gitlab_expected
def shouldfail(lam):
try:
res = lam()
print_stderr("Wrong outcome: '%s'" % res)
assert False
except Error:
print_stderr('OK')
def test_check_trim_relative_path():
assert check_trim_relative_path('.') == '.'
assert check_trim_relative_path('./foo/bar') == 'foo/bar'
assert check_trim_relative_path('bar/baz') == 'bar/baz'
shouldfail(lambda: check_trim_relative_path('/etc'))
def test_eval_if():
matrix1 = {"base": "latest", "coq": "dev"}
matrix2 = {"base": "4.09.0-flambda", "coq": "dev"}
assert eval_if('{matrix[base]}=="latest"', matrix1)
assert eval_if('{matrix[base]} == "latest"', matrix1)
assert eval_if(' "{matrix[base]}" == "latest"', matrix1)
assert eval_if('{matrix[base]}!="latest"', matrix2)
assert eval_if('{matrix[base]} != "latest"', matrix2)
assert eval_if(' "{matrix[base]}" != "latest"', matrix2)
def test_is_unique():
s = [1, 2, 4, 0, 4]
assert not is_unique(s)
s = uniqify(s)
assert is_unique(s)
def test_uniqify():
assert uniqify([1, 2, 4, 0, 4]) == [0, 1, 2, 4]
def test_merge_dict():
foo = {'a': 1, 'c': 2}
bar = {'b': 3, 'c': 4}
foobar = merge_dict(foo, bar)
assert foobar == {'a': 1, 'b': 3, 'c': 4}
def test_diff_list():
l1 = [1, 2, 4, 2, 5, 4]
l2 = [3, 1, 2]
assert diff_list(l1, l2) == [4, 5, 4]
def test_subset_list():
l2 = [2, 3]
l1 = [2]
l0 = [3, 4, 5]
l3 = [2, 3, 5]
assert subset_list(l2, l3)
assert not subset_list(l2, l1)
assert not subset_list(l2, l0)
def test_equalize_args():
assert (equalize_args({"VAR1": "value1", "VAR2": "value2"}) ==
['VAR1=value1', 'VAR2=value2'])
def test_merge_data():
l1 = [{"i": 1, "s": "a"}, {"i": 2, "s": "b"}, {"i": 1, "s": "a"}]
l2 = [{"i": 2, "s": "b"}, {"i": 2, "s": "b"}, {"i": 3, "s": "c"}]
res1 = merge_data(l1, l2)
assert res1 == [{"i": 1, "s": "a"}, {"i": 2, "s": "b"}, {"i": 1, "s": "a"},
{"i": 3, "s": "c"}]
res2 = merge_data(l2, l1)
assert res2 == [{"i": 2, "s": "b"}, {"i": 2, "s": "b"}, {"i": 3, "s": "c"},
{"i": 1, "s": "a"}, {"i": 1, "s": "a"}]
def test_meet_list():
assert not meet_list([1, 2], [])
assert not meet_list([], [2, 3])
assert not meet_list([1, 2], [3])
assert meet_list([1, 2], [2, 3])
def test_first_shortest_tag():
assert first_shortest_tag(['BB', 'AA', 'z', 'y']) == 'y'
def test_indent_script():
assert indent_script(['echo ok', 'echo "The End"'], 6, True) == \
' echo ok\n echo "The End"'
assert indent_script(['echo ok', 'echo "The End"'], 6) == \
'echo ok\n echo "The End"'
if __name__ == "__main__":
main(sys.argv[1:])
|
# @lc app=leetcode id=326 lang=python3
#
# [326] Power of Three
#
# https://leetcode.com/problems/power-of-three/description/
#
# algorithms
# Easy (42.60%)
# Likes: 340
# Dislikes: 45
# Total Accepted: 372.3K
# Total Submissions: 873.9K
# Testcase Example: '27'
#
# Given an integer n, return true if it is a power of three. Otherwise, return
# false.
#
# An integer n is a power of three, if there exists an integer x such that n ==
# 3^x.
#
#
# Example 1:
# Input: n = 27
# Output: true
# Example 2:
# Input: n = 0
# Output: false
# Example 3:
# Input: n = 9
# Output: true
# Example 4:
# Input: n = 45
# Output: false
#
#
# Constraints:
#
#
# -2^31 <= n <= 2^31 - 1
#
#
#
# Follow up: Could you solve it without loops/recursion?
#
# @lc tags=math
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 判断一个整数,是否是3的幂。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def isPowerOfThree(self, n: int) -> bool:
if n <= 0:
return False
while n != 1:
if n % 3 == 0:
n = n // 3
else:
return False
return True
pass
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print('Example 1:')
print('Input : ')
print('n = 27')
print('Exception :')
print('true')
print('Output :')
print(str(Solution().isPowerOfThree(27)))
print()
print('Example 2:')
print('Input : ')
print('n = 0')
print('Exception :')
print('false')
print('Output :')
print(str(Solution().isPowerOfThree(0)))
print()
print('Example 3:')
print('Input : ')
print('n = 9')
print('Exception :')
print('true')
print('Output :')
print(str(Solution().isPowerOfThree(9)))
print()
print('Example 4:')
print('Input : ')
print('n = 45')
print('Exception :')
print('false')
print('Output :')
print(str(Solution().isPowerOfThree(45)))
print()
pass
# @lc main=end |
from ..machine import Machine
class GenericVideo(object):
PAUSED = "paused"
PLAYING = "playing"
STOPPED = "stopped"
def __init__(self, skip_optional_validation=True):
self.skip_optional_validation = skip_optional_validation
def set_machine(self, transitions, initial_state):
self.machine = Machine(self, transitions, initial_state, skip_optional_validation=self.skip_optional_validation)
def play(self):
...
def pause(self):
...
def stop(self):
...
class BadVideo(object):
PAUSED = "paused"
PLAYING = "playing"
STOPPED = "stopped"
def __init__(self, skip_optional_validation=True, add_is_state=False):
transitions = [
{"trigger": "play", "source": self.PAUSED, "dest": self.PLAYING},
{"trigger": "play", "source": self.STOPPED, "dest": self.PLAYING},
{"trigger": "pause", "source": self.PLAYING, "dest": self.PAUSED},
{"trigger": "stop", "source": self.PLAYING, "dest": self.STOPPED},
{"trigger": "stop", "source": self.PAUSED, "dest": self.STOPPED},
]
self.machine = Machine(self, transitions, self.STOPPED, skip_optional_validation, add_is_state)
@property
def state(self):
return self.machine.state
def play(self):
...
class GoodVideo(BadVideo):
def pause(self):
...
def stop(self):
...
class ExtraGoodVideo(GoodVideo):
def extra(self):
...
|
# FROMS
from models import Player
from graph import get_graph_from_edges, draw_graph, get_full_cycles_from_graph,\
full_cycle_to_edges, get_one_full_cycle, convert_full_cycle_to_graph,\
get_one_full_cycle_from_graph, get_hamiltonian_path_from_graph,\
is_there_definitely_no_hamiltonian_cycle, hamilton
import datetime
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(
filename=f'logs/{datetime.datetime.utcnow().strftime("%Y-%m-%d-%H-%M-%S")}.log',
filemode='w',
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
import networkx as nx
import time
import random
# from random import shuffle
from random import randint
# Constants
GENDER_MALE = "male"
GENDER_FEMALE = "female"
GENDER_NONBINARY = "non-binary"
GENDER_NOPREF = "no preference"
DISPLAY_GRAPH = True
MINIMUM_MATCHED_PLAYERS_BEFORE_CSVOUTPUT = 0.8 ##Proportion minimum of total player count in accepted csv before 2 csvs will be outputted (1st accepted players list, 2nd rejected players list
RELAX_GENDERPREF_REQUIREMENT_PERCENTAGE = 0.35
RELAX_NO_SAME_HOUSE_REQUIREMENT_PERCENTAGE = 0.35
# Changing this value changes how much we care about the houses of players being the same
# If 1 - we don't care, and house de-conflicting is ignored. 0 means we won't allow any players of the same house to be matched.
RELAX_NO_SAME_CG_REQUIREMENT_PERCENTAGE = 0.00
# RELAX_NO_SAME_FACULTY_REQUIREMENT_PERCENTAGE = 0.00 #not used
def get_house_from_player(player):
if player.housenumber == "":
raise ValueError('House number provided ' + player.housenumber +
' for player ' + str(player.username) + ' is invalid!')
return player.housenumber
def get_cg_from_player(player):
if (player.cgnumber == ""):
return str(randint(60,8888)) #Nursing has no CGs, thus we do not want to conflict with Medicine CGs 1-50
else:
return player.cgnumber
def is_gender_pref_respected(player_being_checked, other_player):
if player_being_checked.genderpref == GENDER_NOPREF:
# If they have no preference, always respected
# print (f"No gender pref")
return True
else:
# Otherwise check if the other_player gender is what is wanted
gender_pref_respected = player_being_checked.genderpref == other_player.genderplayer
return gender_pref_respected
def are_gender_prefs_respected(angel_player, mortal_player):
return is_gender_pref_respected(angel_player, mortal_player) and \
is_gender_pref_respected(mortal_player, angel_player)
def is_there_edge_between_players(angel_player, mortal_player):
'''
Checks if two players are valid as an angel-mortal pair i.e. an "edge"
exists between them. E.g. If we are enforcing a heterogenous gender mix for these
players - check their gender preferences and return False (no edge)
between them
'''
print (f"Checking {angel_player} and {mortal_player}")
#Check if gender choice is respected
random_relax_genderpref_requirement = random.random() < RELAX_GENDERPREF_REQUIREMENT_PERCENTAGE
if random_relax_genderpref_requirement:
gender_pref_is_respected = True
else:
gender_pref_is_respected = are_gender_prefs_respected(
angel_player, mortal_player)
# # Check house and faculty are not the same
'''
no same faculty requirement is not used
'''
# random_relax_fac_requirement = random.random() < RELAX_SAME_FACULTY_REQUIREMENT_PERCENTAGE
# if random_relax_fac_requirement:
# players_are_from_same_faculty = False
# else:
# players_are_from_same_faculty = angel_player.faculty == mortal_player.faculty
# Relax no same house requirement
random_relax_house_requirement = random.random() < RELAX_NO_SAME_HOUSE_REQUIREMENT_PERCENTAGE
if random_relax_house_requirement:
players_are_from_same_house = False
else:
players_are_from_same_house = get_house_from_player(
angel_player) == get_house_from_player(mortal_player)
# Relax no same CG requirement
random_relax_cg_requirement = random.random() < RELAX_NO_SAME_CG_REQUIREMENT_PERCENTAGE
if random_relax_cg_requirement:
players_are_from_same_cg = False
else:
players_are_from_same_cg = get_cg_from_player(
angel_player) == get_cg_from_player(mortal_player)
valid_pairing = gender_pref_is_respected and (not players_are_from_same_house) and (not players_are_from_same_cg)#and (not players_are_from_same_faculty) # Remove same-house reqr --> #or players_are_from_same_house) and
# if players_are_from_same_faculty:
# print (f"players from same fac\n")
#ignore this requirement
if not gender_pref_is_respected:
print (f"gender pref not respected")
if players_are_from_same_house:
print (f"players from same house\n")
if players_are_from_same_cg:
print(f"players from same CG\n")
print (f"\n")
return valid_pairing
def get_player_edges_from_player_list(player_list):
player_edges = []
# iterate through all players in list - compare each player to all others
for player in player_list:
for other_player in player_list:
if other_player != player:
if is_there_edge_between_players(player, other_player):
player_edges.append((player, other_player))
else:
logger.info(f"{player} and {other_player} have conflicts") # to keep track who was rejected
return player_edges
def angel_mortal_arrange(player_list):
'''
Depending on the gender preferences to follow, run the edge-finding
algorithm, generate a graph and find a Hamiltonian circuit.
'''
print (f"Arranging player list: {player_list}")
# Convert the list of players into a list of valid edges
player_edges = get_player_edges_from_player_list(player_list)
# Generate the overall graph from all edges
overall_graph = get_graph_from_edges(player_edges)
print (f"Number of nodes in overall graph: {overall_graph.number_of_nodes()}")
# Find all connected components and find cycles for all
graphs = list(overall_graph.subgraph(c) for c in
nx.strongly_connected_components(overall_graph)) ##.strongly_connected_component_subgraphs(overall_graph) is deprecated in version 2.4 https://stackoverflow.com/questions/61154740/attributeerror-module-networkx-has-no-attribute-connected-component-subgraph
print (f"\nConnected components detected: {len(graphs)}")
print (f"Printing original player list: ")
for player in player_list:
print (f"{player}")
print (f"Original player list size: {len(player_list)}")
print (f"\n\n")
list_of_player_chains = []
# for G in graphs:
# draw_graph(G)
for G in graphs:
print (f"Printing players in current graph:")
for graph_player in G.nodes():
print (f"{graph_player}")
# Draw this intermediate graph
print (f"Number of nodes in graph: {G.number_of_nodes()}")
if DISPLAY_GRAPH:
draw_graph(G)
# Find out if there is DEFINITELY no hamiltonian cycle
is_there_full_cycle = is_there_definitely_no_hamiltonian_cycle(G)
print (f"Is there DEFINITELY no full cycle? - {is_there_full_cycle}")
# Sleep for a few seconds
time.sleep(2)
'''
# Output all cycles that encompass all nodes (valid pairings)
full_cycles = get_full_cycles_from_graph(G)
# Pick any full cycle to draw, or draw nothing if there are no full cycles
full_cycle = get_one_full_cycle(full_cycles)
'''
full_cycle = hamilton(G) #get_one_full_cycle_from_graph(G)
#full_cycle = get_hamiltonian_path_from_graph(G)
# Draw the full cycle if it exists
if full_cycle is not None and (G.number_of_nodes() >= (MINIMUM_MATCHED_PLAYERS_BEFORE_CSVOUTPUT * len(player_list))): #do not print CSV if number of nodes is < 80% of participants
G_with_full_cycle = convert_full_cycle_to_graph(full_cycle)
draw_graph(G_with_full_cycle)
list_of_player_chains.append(full_cycle)
# find out which nodes were missing
players_not_in_csv = set(player_list)-set(list(G.nodes()))
logger.info(f"CSV has been printed. However, the following players {players_not_in_csv} are not inside. Please match them manually.")
print(f"Found a full cycle! CSV is printed. However, the following players {players_not_in_csv} are not inside. Please match them manually.")
else:
print (f"There is no full cycle - sorry! This means that the current set of players cannot form a perfect chain given the arrange requirements. No CSV printed.")
logger.info(f"CSV not printed - no full cycle found")
return list_of_player_chains
|
#!/usr/bin/python
import DeviceAtlasCloud.Client
import json
import argparse
da = DeviceAtlasCloud.Client.Client()
parser = argparse.ArgumentParser(description='Device Atlas Cloud Probe')
parser.add_argument('-ua', action='store', type=str, help='User Agent string', dest='useragent')
args = parser.parse_args()
headers = {'user_agent': args.useragent}
data = da.getDeviceData(headers)
print json.JSONEncoder().encode(data)
|
# Copyright 2021 Photon
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from glob import glob
from setuptools import setup
package_name = 'py_pubsub'
setup(
name=package_name,
version='0.1.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
# Include all launchfile(s)
(os.path.join('share', package_name), glob('launch/*.launch.py')),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='Photon',
maintainer_email='1487quantum@users.noreply.github.com',
description='ROS2 Pub-Sub example, Python',
license='Apache License 2.0',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'pub = py_pubsub.publisher_member_function:main',
'sub = py_pubsub.subscriber_member_function:main',
],
},
)
|
#!/usr/bin/env python
"Neon using Gaussians"
import unittest, sciunittest
from PyQuante.Ints import getbasis,getints
from PyQuante.hartree_fock import rhf
from PyQuante.Molecule import Molecule
# GAMESS-UK HF Energy
# Energy -128.4744065199
energy = -128.474406 # Changed 2003-04-07 to reflect diis
name = "Ne"
def main():
ne = Molecule('Ne',atomlist = [(10,(0,0,0))])
en,orbe,orbs = rhf(ne)
return en
class NeTest(sciunittest.TestCase):
def runTest(self):
"""Energy of Ne (using Gaussians) close to -128.474406?"""
result = main()
self.assertInside(result, energy, 1e-4)
def suite():
return unittest.TestLoader().loadTestsFromTestCase(NeTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
|
#
# growler/indexer/middleware.py
#
from os import (path, listdir)
HTML_TMPL_STR = """
<!DOCTYPE html>
<html>
<head>{head}</head>
<body>{body}</body>
</html>
"""
HEAD_TMPL_STR = """
<meta charset=utf8>
<title>{title}</title>
"""
BODY_TMPL_STR = """
<h1>Index of {path}</h1>
<ul>{file_list}</ul>
"""
PATH_NOT_FOUND_ERR_BODY_TMPL_STR = """
<h1>404 - Not Found</h1>
<p>The requested URL '{path}' was not found on this server.</p>
"""
SERVER_ERROR_TMPL_STR = """
<h1>{error}</h1>
<p>{msg}</p>
"""
class Indexer():
"""
The main middleware class which does the file searching and the html
rendering.
"""
def __init__(self, dir, prefix='/'):
"""
Construct an Indexer middleware object, provided a directory to call
and an optional http prefix.
@param dir str: The path to the directory to serve.
@param prefix str: The http prefix
"""
print("[Indexer::__init__] dir=%s, prefix=%s" % (dir, prefix))
self.path = dir
self.prefix = prefix
self.abs = path.abspath(path.expanduser(dir))
print(" hosting", self.abs)
def __call__(self, req, res):
"""
The middleware function for incoming requests. The path is split and
explored.
"""
code = 200
tpath = path.join(self.abs, *req.path[1:].split('/'))
try:
filenames = listdir(tpath)
except FileNotFoundError:
head = HEAD_TMPL_STR.format(title="404 Not Found")
body = PATH_NOT_FOUND_ERR_BODY_TMPL_STR.format(path=self.abs)
code = 404
except NotADirectoryError:
res.send_file(tpath)
return
except Exception as e:
msg = "path '%s' returned the error '%s'" % (self.abs, e)
head = HEAD_TMPL_STR.format(title="ERROR")
body = SERVER_ERROR_TMPL_STR.format(error='500 - Server Error',
msg=msg)
code = 500
else:
hostname = (req.headers['HOST'], req.path)
filename_frmt = "<a href='http://%s%s/{0}'>{0}</a>" % hostname
filelinks = [filename_frmt.format(f) for f in filenames]
file_list = "<li>%s</li>" % ("</li><li>".join(filelinks))
title = "Index of %s" % (self.path)
head = HEAD_TMPL_STR.format(title=title)
body = BODY_TMPL_STR.format(path=self.abs, file_list=file_list)
res.send_html(HTML_TMPL_STR.format(head=head, body=body), code)
|
#!/usr/bin/env python3
import unittest
import torch
from Lgpytorch.kernels import SpectralMixtureKernel
class TestSpectralMixtureKernel(unittest.TestCase):
def create_kernel(self, num_dims, **kwargs):
return SpectralMixtureKernel(num_mixtures=5, ard_num_dims=num_dims, **kwargs)
def create_data_no_batch(self):
return torch.randn(50, 10)
def create_data_single_batch(self):
return torch.randn(2, 50, 2)
def create_data_double_batch(self):
return torch.randn(3, 2, 50, 2)
def test_active_dims_list(self):
x = self.create_data_no_batch()
kernel = self.create_kernel(num_dims=4, active_dims=[0, 2, 4, 6])
y = torch.randn_like(x[..., 0])
kernel.initialize_from_data(x, y)
kernel.initialize_from_data_empspect(x, y)
covar_mat = kernel(x).evaluate_kernel().evaluate()
kernel_basic = self.create_kernel(num_dims=4)
kernel_basic.raw_mixture_weights.data = kernel.raw_mixture_weights
kernel_basic.raw_mixture_means.data = kernel.raw_mixture_means
kernel_basic.raw_mixture_scales.data = kernel.raw_mixture_scales
covar_mat_actual = kernel_basic(x[:, [0, 2, 4, 6]]).evaluate_kernel().evaluate()
self.assertLess(torch.norm(covar_mat - covar_mat_actual) / covar_mat_actual.norm(), 1e-4)
def test_active_dims_range(self):
active_dims = list(range(3, 9))
x = self.create_data_no_batch()
kernel = self.create_kernel(num_dims=6, active_dims=active_dims)
y = torch.randn_like(x[..., 0])
kernel.initialize_from_data(x, y)
kernel.initialize_from_data_empspect(x, y)
covar_mat = kernel(x).evaluate_kernel().evaluate()
kernel_basic = self.create_kernel(num_dims=6)
kernel_basic.raw_mixture_weights.data = kernel.raw_mixture_weights
kernel_basic.raw_mixture_means.data = kernel.raw_mixture_means
kernel_basic.raw_mixture_scales.data = kernel.raw_mixture_scales
covar_mat_actual = kernel_basic(x[:, active_dims]).evaluate_kernel().evaluate()
self.assertLess(torch.norm(covar_mat - covar_mat_actual) / covar_mat_actual.norm(), 1e-4)
def test_no_batch_kernel_single_batch_x(self):
x = self.create_data_single_batch()
kernel = self.create_kernel(num_dims=x.size(-1))
y = torch.randn_like(x[..., 0])
kernel.initialize_from_data(x, y)
kernel.initialize_from_data_empspect(x, y)
batch_covar_mat = kernel(x).evaluate_kernel().evaluate()
actual_mat_1 = kernel(x[0]).evaluate_kernel().evaluate()
actual_mat_2 = kernel(x[1]).evaluate_kernel().evaluate()
actual_covar_mat = torch.cat([actual_mat_1.unsqueeze(0), actual_mat_2.unsqueeze(0)])
self.assertLess(torch.norm(batch_covar_mat - actual_covar_mat) / actual_covar_mat.norm(), 1e-4)
# Test diagonal
kernel_diag = kernel(x, diag=True)
actual_diag = actual_covar_mat.diagonal(dim1=-1, dim2=-2)
self.assertLess(torch.norm(kernel_diag - actual_diag) / actual_diag.norm(), 1e-4)
def test_single_batch_kernel_single_batch_x(self):
x = self.create_data_single_batch()
kernel = self.create_kernel(num_dims=x.size(-1), batch_shape=torch.Size([]))
y = torch.randn_like(x[..., 0])
kernel.initialize_from_data(x, y)
kernel.initialize_from_data_empspect(x, y)
batch_covar_mat = kernel(x).evaluate_kernel().evaluate()
actual_mat_1 = kernel(x[0]).evaluate_kernel().evaluate()
actual_mat_2 = kernel(x[1]).evaluate_kernel().evaluate()
actual_covar_mat = torch.cat([actual_mat_1.unsqueeze(0), actual_mat_2.unsqueeze(0)])
self.assertLess(torch.norm(batch_covar_mat - actual_covar_mat) / actual_covar_mat.norm(), 1e-4)
# Test diagonal
kernel_diag = kernel(x, diag=True)
actual_diag = actual_covar_mat.diagonal(dim1=-1, dim2=-2)
self.assertLess(torch.norm(kernel_diag - actual_diag) / actual_diag.norm(), 1e-4)
def test_smoke_double_batch_kernel_double_batch_x(self):
x = self.create_data_double_batch()
kernel = self.create_kernel(num_dims=x.size(-1), batch_shape=torch.Size([3, 2]))
y = torch.randn_like(x[..., 0])
kernel.initialize_from_data(x, y)
kernel.initialize_from_data_empspect(x, y)
batch_covar_mat = kernel(x).evaluate_kernel().evaluate()
kernel_diag = kernel(x, diag=True)
return batch_covar_mat, kernel_diag
def test_kernel_getitem_single_batch(self):
x = self.create_data_single_batch()
kernel = self.create_kernel(num_dims=x.size(-1), batch_shape=torch.Size([2]))
res1 = kernel(x).evaluate()[0] # Result of first kernel on first batch of data
new_kernel = kernel[0]
res2 = new_kernel(x[0]).evaluate() # Should also be result of first kernel on first batch of data.
self.assertLess(torch.norm(res1 - res2) / res1.norm(), 1e-4)
# Test diagonal
kernel_diag = kernel(x, diag=True)
actual_diag = res1.diagonal(dim1=-1, dim2=-2)
self.assertLess(torch.norm(kernel_diag - actual_diag) / actual_diag.norm(), 1e-4)
def test_kernel_getitem_double_batch(self):
x = self.create_data_double_batch()
kernel = self.create_kernel(num_dims=x.size(-1), batch_shape=torch.Size([3, 2]))
res1 = kernel(x).evaluate()[0, 1] # Result of first kernel on first batch of data
new_kernel = kernel[0, 1]
res2 = new_kernel(x[0, 1]).evaluate() # Should also be result of first kernel on first batch of data.
self.assertLess(torch.norm(res1 - res2) / res1.norm(), 1e-4)
# Test diagonal
kernel_diag = kernel(x, diag=True)
actual_diag = res1.diagonal(dim1=-1, dim2=-2)
self.assertLess(torch.norm(kernel_diag - actual_diag) / actual_diag.norm(), 1e-4)
if __name__ == "__main__":
unittest.main()
|
#entrada
while True:
frase = str(input())
if frase[0] == '*':
break
#processamento
letra = frase[0].upper()
frase = frase.split()
palavras = len(frase)
flag = 'Y'
for i in range(1, palavras):
if frase[i][0].upper() != letra:
flag = 'N'
break
#saida
print(flag)
|
import gzip
import mmap
import pickle
import re
import signal
from requests import codes as status_code_values
from athena import SharedFragmentTree, tokenise_and_join_with_spaces
def extract_common_fragments_per_lui_worker(
queue, lui, filepath, entry_points, exclusions
):
signal.signal(signal.SIGINT, signal.SIG_IGN)
tokens = []
with open(filepath, "r+b") as raw:
with mmap.mmap(raw.fileno(), 0) as raw:
for entry_point in entry_points:
raw.seek(entry_point)
with gzip.GzipFile(fileobj=raw, mode="rb") as file:
ping = pickle.load(file)
content = ping["content"]
http = (
ping["redirects"][-1]["status"]
if len(ping["redirects"]) > 0
else None
)
if content is not None and http == status_code_values.ok:
tokens.append(
tokenise_and_join_with_spaces(content, exclusions).split(
" "
)
)
if len(tokens) > 0:
tree = SharedFragmentTree(tokens)
fragments = tree.extract_combination_of_all_common_fragments()
else:
fragments = []
queue.put((lui, fragments), True)
|
class ClassificationModel(object):
def __init__(self, modeltype):
pass
def predict_part_no(self, image):
return "3020a", 0.8
def predict_color_id(self, image):
return 3, 0.9
|
from werkzeug.security import generate_password_hash, check_password_hash
from accessors.user_accessor import UserAccessor
from redis_processor.message_processor import MessageProcessor, Message
import logging
from helpers.image_server import ImageServer
import os
class UserException(Exception):
pass
class UserResource(object):
def __init__(self):
self.accessor = UserAccessor()
self.processor = MessageProcessor()
self.logger = logging.getLogger(__name__)
@staticmethod
def set_password(password):
psw_hash = generate_password_hash(password)
return psw_hash
@staticmethod
def check_password(psw_hash, password):
return check_password_hash(psw_hash, password)
def get_roles(self, username):
query = {'username': username}
projection = {'roles': 1, '_id': 0}
return self.accessor.collection.find(query, projection)
def get_gallery(self, username, skip=0, limit=None, sort=None):
return self.accessor.get_paginated_list(username, 'gallery', skip, limit, sort)
def get_gallery_item(self, username, gallery_id):
return self.accessor.get_list_item(username, 'gallery', gallery_id)
def insert_gallery_item(self, username, gallery_item):
self.accessor.insert_list_item(username, 'gallery', gallery_item)
return gallery_item
def create_user(self, username, email, password):
return self.accessor.create_user(username, email, password)
def insert_message(self, username, message):
self.accessor.insert_message(username, message)
def post_message(self, username, form_content):
message = self.processor.create_message(username, **form_content)
self.accessor.insert_message(username, message)
def get_pending_message(self, username):
message = self.accessor.get_last_message(username)
if not message:
return message, 404
message = Message.load_from_document(message)
path = message.current
resp = ImageServer().serve_thumbnail_from_path(path, 300)
return resp
def get_pending_message_json(self, username):
message = self.accessor.get_last_message(username)
if not message:
return message, 404
message = Message.load_from_document(message)
return message.jsonify()
def get_uploads(self, username):
return self.accessor.get_list(username, 'uploads')
def get_upload(self, username, file_id):
return self.accessor.get_array_element({'username': username}, 'uploads', {'uploads.file_id': file_id})
def delete_uploads_item(self, username, upload):
file_id = upload.get('file_id')
return self.delete_upload_by_id(username, file_id)
def delete_upload_by_id(self, username, file_id):
if not file_id:
return {'message': 'No file_id provided'}, 400
upload = self.get_upload(username, file_id)
if upload:
self._remove_files(upload)
self.accessor.delete_one_element({'username': username}, 'uploads', upload)
return {'message': f'Successfully deleted {file_id} for {username}'}, 200
return {'message': f'No file to Remove'}, 200
def _remove_files(self, upload_item):
file_path = upload_item.get('img_path', '')
if os.path.exists(file_path):
os.remove(file_path)
self.accessor.fs_accessor.delete(upload_item.get('file_id'))
self.accessor.fs_accessor.delete(upload_item.get('thumbnail_id'))
"""
python
from resources.user_resource import UserResource
foo = UserResource()
foo.get_upload('rjvanvoorhis', '9ab159cb-474f-4be2-963e-3e20da872886')
""" |
from src.TeXtable import tex_table
if __name__ == '__main__':
# right
print(tex_table(style = 1,csv_text='./test/test.csv'))
print(tex_table(style = 2,csv_text='./test/test.csv'))
print(tex_table(style = 3,csv_text='./test/test.csv'))
print(tex_table(style = 4,csv_text='./test/test.csv')) |
#! python3
# __author__ = "YangJiaHao"
# date: 2018/2/23
class Solution:
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
result = {}
for s in strs:
# key = tuple(sorted(s))
# result[key] = result.get(key, []) + [s]
if tuple(sorted(s)) in result:
result[tuple(sorted(s))].append(s)
else:
result[tuple(sorted(s))] = [s]
return list(result.values())
if __name__ == '__main__':
so = Solution()
res = so.groupAnagrams(["eat", "tea", "tan", "ate", "nat", "bat"])
print(res)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Example on how to use the 'Pendulum' OpenAI Gym environments in PRL using the `stable_baselines` library.
"""
from stable_baselines.common.policies import MlpPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
from pyrobolearn.envs import gym # this is a thin wrapper around the gym library
# create env, state, and action from gym
env = gym.make('Pendulum-v0')
state, action = env.state, env.action
print("State and action space: {} and {}".format(state.space, action.space))
# The algorithms require a vectorized environment to run
env = DummyVecEnv([lambda: env])
model = PPO2(MlpPolicy, env, verbose=1)
model.learn(total_timesteps=10000)
obs = env.reset()
for i in range(1000):
action, _states = model.predict(obs)
obs, rewards, dones, info = env.step(action)
env.render()
|
'''
Retrieval Network Testing in Discrete World, Written by Xiao
For robot localization in a dynamic environment.
'''
# Import params and similarity from lib module
import torch
import argparse, os, copy, pickle, time
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
from progress.bar import Bar
from torchvision import transforms
from torch.utils.data import DataLoader
from termcolor import colored
from lib.scene_graph_generation import Scene_Graph
from Network.navigation_network.params import *
from Network.navigation_network.datasets import NaviDataset
from Network.navigation_network.networks import NavigationNet
from Network.navigation_network.losses import Cross_Entropy_Loss
from Network.navigation_network.trainer import Training
from Network.retrieval_network.trainer import plot_training_statistics
from discrete_RNet_pipeline import show_testing_histogram, show_testing_histogram_comparison
from os.path import dirname, abspath
# ------------------------------------------------------------------------------
# -------------------------------Testing Pipeline-------------------------------
# ------------------------------------------------------------------------------
def testing_pipeline(Dataset, Network, LossFcn, checkpoints_prefix, is_only_image_branch=False, benchmark=None):
# ---------------------------Loading testing dataset---------------------------
print('----'*20 + '\n' + colored('Network Info: ','blue') + 'Loading testing dataset...')
test_dataset = Dataset(DATA_DIR, is_test=True, load_only_image_data=is_only_image_branch)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, num_workers=NUM_WORKERS)
# ------------------------------Initialize model--------------------------------
print('----'*20 + '\n' + colored('Network Info: ','blue') + 'Initialize model...')
model = Network(only_image_branch=is_only_image_branch, benchmarkName=benchmark)
model.load_state_dict(torch.load(checkpoints_prefix + 'best_fit.pkl'))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Model testing on: ", device)
print("Cuda is_available: ", torch.cuda.is_available())
print('----'*20)
model.to(device)
model.eval()
torch.set_grad_enabled(False)
# ------------------------------Testing Main------------------------------------
testing_statistics = {}
bar = Bar('Processing', max=len(test_loader))
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs = tuple(input.to(device) for input in inputs)
targets = targets.to(device)
outputs = model(*inputs)
_, is_correct = LossFcn(outputs, targets, batch_average_loss=True)
iFloorPlan = test_dataset.trajectories[batch_idx][0].split('/')[4]
if iFloorPlan in testing_statistics:
testing_statistics[iFloorPlan]['total'] += 1
testing_statistics[iFloorPlan]['corrects'] += is_correct.item()
else:
testing_statistics.update({iFloorPlan:dict(total=1, corrects=is_correct.item())})
bar.next()
bar.finish()
print('----'*20)
np.save(checkpoints_prefix + 'testing_statistics.npy', testing_statistics)
# ------------------------------------------------------------------------------
# -------------------------------Training Pipeline------------------------------
# ------------------------------------------------------------------------------
def training_pipeline(Dataset, Network, LossFcn, Training, checkpoints_prefix, is_only_image_branch=False, benchmark=None):
dataset_sizes = {}
# ---------------------------Loading training dataset---------------------------
print('----'*20 + '\n' + colored('Network Info: ','blue') + 'Loading training dataset...')
train_dataset = Dataset(DATA_DIR, is_train=True, load_only_image_data=is_only_image_branch)
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE[benchmark], shuffle=True, num_workers=NUM_WORKERS)
dataset_sizes.update({'train': len(train_dataset)})
# --------------------------Loading validation dataset--------------------------
print('----'*20 + '\n' + colored('Network Info: ','blue') + 'Loading validation dataset...')
val_dataset = Dataset(DATA_DIR, is_val=True, load_only_image_data=is_only_image_branch)
val_loader = DataLoader(val_dataset, batch_size=BATCH_SIZE[benchmark], shuffle=True, num_workers=NUM_WORKERS)
dataset_sizes.update({'val': len(val_dataset)})
# ------------------------------Initialize model--------------------------------
print('----'*20 + '\n' + colored('Network Info: ','blue') + 'Initialize model...')
model = Network(only_image_branch=is_only_image_branch, benchmarkName=benchmark)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Model training on: ", device)
print("Cuda is_available: ", torch.cuda.is_available())
model.to(device)
# Uncomment to see the summary of the model structure
# summary(model, input_size=[(3, IMAGE_SIZE, IMAGE_SIZE), (3, IMAGE_SIZE, IMAGE_SIZE)])
# ----------------------------Set Training Critera------------------------------
print('----'*20 + '\n' + colored('Network Info: ','blue') + 'Set Training Critera...')
# Define loss function
loss_fcn = LossFcn
# Observe that all parameters are being optimized
optimizer = torch.optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=MOMENTUM)
# optimizer = torch.optim.Adam(model.parameters())
# Decay LR by a factor of GAMMA every STEP_SIZE epochs
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=GAMMA)
# --------------------------------Training--------------------------------------
print('----'*20 + '\n' + colored('Network Info: ','blue') + 'Training with dataset size --> ', dataset_sizes)
data_loaders = {'train': train_loader, 'val': val_loader}
model_best_fit = Training(data_loaders, dataset_sizes, model, loss_fcn, optimizer, lr_scheduler, num_epochs=NUM_EPOCHS, checkpoints_prefix=checkpoints_prefix, batch_size=BATCH_SIZE[benchmark])
# ------------------------------------------------------------------------------
print('----'*20 + '\n' + colored('Network Info: ','blue') + 'Done... Best Fit Model Saved')
print('----'*20)
return model_best_fit
# ------------------------------------------------------------------------------
if __name__ == '__main__':
# --------------------------------------------------------------------------
# Get argument from CMD line
parser = argparse.ArgumentParser()
parser.add_argument("--train", help="train network", action="store_true")
parser.add_argument("--test", help="test network", action="store_true")
parser.add_argument("--heatmap", help="test network", action="store_true")
parser.add_argument("--benchmark", help="network image branch", action="store_true")
parser.add_argument("--name", type=str, default='none', help="benchmark network name: vgg16, resnet50, resnext50_32x4d, googlenet")
parser.add_argument("--rnet", help="entire network", action="store_true")
args = parser.parse_args()
torch.cuda.empty_cache()
show_testing_histogram_comparison(parent_dir=CHECKPOINTS_DIR,filename='testing_statistics.npy')
# --------------------------------------------------------------------------
# Train corresponding networks
if args.train:
Dataset = NaviDataset
Network = NavigationNet
LossFcn = Cross_Entropy_Loss()
if args.benchmark and not args.rnet:
checkpoints_prefix = CHECKPOINTS_DIR + args.name + '/'
elif args.rnet and not args.benchmark:
checkpoints_prefix = CHECKPOINTS_DIR + 'rnet/'
args.name = 'rnet'
else:
print('----'*20 + '\n' + colored('Network Error: ','red') + 'Please specify a branch (image/all)')
TraningFcn = Training
model_best_fit = training_pipeline(Dataset, Network, LossFcn, TraningFcn, checkpoints_prefix, is_only_image_branch=args.benchmark, benchmark=args.name)
torch.save(model_best_fit.state_dict(), checkpoints_prefix + 'best_fit.pkl')
plot_training_statistics(parent_dir=CHECKPOINTS_DIR, filename='training_statistics.npy')
# --------------------------------------------------------------------------
# Test corresponding networks
if args.test:
Dataset = NaviDataset
Network = NavigationNet
LossFcn = Cross_Entropy_Loss()
if args.benchmark and not args.rnet:
checkpoints_prefix = CHECKPOINTS_DIR + args.name + '/'
elif args.rnet and not args.benchmark:
checkpoints_prefix = CHECKPOINTS_DIR + 'rnet/'
args.name = 'rnet'
else:
print('----'*20 + '\n' + colored('Network Error: ','red') + 'Please specify a branch (image/all)')
testing_pipeline(Dataset, Network, LossFcn, checkpoints_prefix, is_only_image_branch=args.benchmark, benchmark=args.name)
show_testing_histogram(checkpoints_prefix+'testing_statistics.npy')
show_testing_histogram_comparison(parent_dir=CHECKPOINTS_DIR,filename='testing_statistics.npy')
|
# https://github.com/abhishekchhibber/Gmail-Api-through-Python/blob/master/gmail_read.py
'''
Reading GMAIL using Python
- Abhishek Chhibber
'''
'''
This script does the following:
- Go to Gmal inbox
- Find and read all the unread messages
- Extract details (Date, Sender, Subject, Snippet, Body) and export them to a .csv file / DB
- Mark the messages as Read - so that they are not read again
'''
'''
Before running this script, the user should get the authentication by following
the link: https://developers.google.com/gmail/api/quickstart/python
Also, client_secret.json should be saved in the same directory as this file
'''
# Importing required libraries
from apiclient import discovery
from apiclient import errors
from httplib2 import Http
from oauth2client import file, client, tools
import base64
#from bs4 import BeautifulSoup
import re
import time
#import dateutil.parser as parser
from datetime import datetime
import datetime
import csv
import pprint
# Creating a storage.JSON file with authentication details
SCOPES = 'https://www.googleapis.com/auth/gmail.modify' # we are using modify and not readonly, as we will be marking the messages Read
store = file.Storage('/d/gm/storage.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('/d/gm/client_secret.json', SCOPES)
creds = tools.run_flow(flow, store)
GMAIL = discovery.build('gmail', 'v1', http=creds.authorize(Http()))
user_id = 'me'
label_id_one = 'INBOX'
label_id_two = 'UNREAD'
# Getting all the unread messages from Inbox
# labelIds can be changed accordingly
unread_msgs = GMAIL.users().messages().list(userId='me',labelIds=[label_id_one, label_id_two]).execute()
# We get a dictonary. Now reading values for the key 'messages'
pprint.pprint(unread_msgs)
mssg_list = unread_msgs['messages']
print ("Total unread messages in inbox: ", str(len(mssg_list)))
final_list = [ ]
for mssg in mssg_list:
temp_dict = { }
m_id = mssg['id'] # get id of individual message
message = GMAIL.users().messages().get(userId=user_id, id=m_id).execute() # fetch the message using API
payld = message['payload'] # get payload of the message
headr = payld['headers'] # get header of the payload
for one in headr: # getting the Subject
if one['name'] == 'Subject':
msg_subject = one['value']
temp_dict['Subject'] = msg_subject
else:
pass
for two in headr: # getting the date
if two['name'] == 'Date':
# msg_date = two['value']
# date_parse = (parser.parse(msg_date))
# m_date = (date_parse.date())
temp_dict['Date'] = two['value'] # str(m_date)
else:
pass
for three in headr: # getting the Sender
if three['name'] == 'From':
msg_from = three['value']
temp_dict['Sender'] = msg_from
else:
pass
temp_dict['Snippet'] = message['snippet'] # fetching message snippet
try:
# Fetching message body
mssg_parts = payld['parts'] # fetching the message parts
part_one = mssg_parts[0] # fetching first element of the part
part_body = part_one['body'] # fetching body of the message
part_data = part_body['data'] # fetching data from the body
clean_one = part_data.replace("-","+") # decoding from Base64 to UTF-8
clean_one = clean_one.replace("_","/") # decoding from Base64 to UTF-8
clean_two = base64.b64decode (bytes(clean_one, 'UTF-8')) # decoding from Base64 to UTF-8
##? soup = BeautifulSoup(clean_two , "lxml" )
##? mssg_body = soup.body()
# mssg_body is a readible form of message body
# depending on the end user's requirements, it can be further cleaned
# using regex, beautiful soup, or any other method
##? temp_dict['Message_body'] = mssg_body
except :
pass
print (temp_dict)
final_list.append(temp_dict) # This will create a dictonary item in the final list
# This will mark the messagea as read
GMAIL.users().messages().modify(userId=user_id, id=m_id,body={ 'removeLabelIds': ['UNREAD']}).execute()
print ("Total messaged retrived: ", str(len(final_list)))
'''
The final_list will have dictionary in the following format:
{ 'Sender': '"email.com" <name@email.com>',
'Subject': 'Lorem ipsum dolor sit ametLorem ipsum dolor sit amet',
'Date': 'yyyy-mm-dd',
'Snippet': 'Lorem ipsum dolor sit amet'
'Message_body': 'Lorem ipsum dolor sit amet'}
The dictionary can be exported as a .csv or into a databse
'''
#exporting the values as .csv
with open('CSV_NAME.csv', 'w', encoding='utf-8', newline = '') as csvfile:
fieldnames = ['Sender','Subject','Date','Snippet','Message_body']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter = ',')
writer.writeheader()
for val in final_list:
writer.writerow(val)
|
# python 2
# encoding: utf-8
import os
import sys
import subprocess
from datetime import datetime
from workflow import Workflow, MATCH_SUBSTRING
from workflow.background import run_in_background
import mullvad_actions
import helpers
GITHUB_SLUG = 'atticusmatticus/alfred-mullvad'
#############################
######## SUBROUTINES ########
#############################
def execute(cmdList):
""" Execute a terminal command from list of arguments
Arguments:
cmdList -- command line command (list of strings)
Returns:
cmd/err -- output of terminal command (tuple of strings)
"""
newEnv = os.environ.copy()
newEnv['PATH'] = '/usr/local/bin:%s' % newEnv['PATH'] # prepend the path to `mullvad` executable to the system path
cmd, err = subprocess.Popen(cmdList,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=newEnv).communicate() # .communicate() returns cmd and err as a tuple
if err:
return err
return cmd
def get_auto_connect():
""" Get Mullvad Auto-Connect Status
Arguments:
None
Returns:
String of status -- "Autoconnect: on/off"
"""
return execute(['mullvad', 'auto-connect', 'get']).splitlines()
def get_lan():
""" Get Mullvad Local Network Sharing Status
Arguments:
None
Returns:
String of status -- "Local network sharing setting: allow/block"
"""
return execute(['mullvad', 'lan', 'get']).splitlines()
def get_kill_switch():
""" Get Mullvad Kill-Switch Status
Arguments:
None
Returns:
String of status -- "Network traffic will be allowed/blocked when the VPN is disconnected"
"""
return execute(['mullvad', 'always-require-vpn', 'get']).splitlines()
def get_version():
""" Get Mullvad Version
Arguments:
None
Returns:
List -- [mullvad supported:True/False, currentVersion='1234.5', latestVersion='6789.0']
"""
mullVersion = execute(['mullvad', 'version'])
# print mullVersion
supported = mullVersion.splitlines()[1].split()[2]
if supported == 'true':
supported = True
elif supported == 'false':
supported = False
# print supported
currentVersion = mullVersion.splitlines()[0].split(':')[1].strip()
# print currentVersion
latestVersion = mullVersion.splitlines()[3].split(':')[1].strip()
# print latestVersion
# print currentVersion==latestVersion
return [supported, currentVersion, latestVersion]
def connection_status():
""" Add workflow item of current connection status
Arguments:
None
Returns:
Item -- Connected/Disconnected/Blocked
"""
for status in get_connection():
# print 'status:', status
stat = str(status.split()[2])
# print 'stat:', stat
if stat == 'Connected':
countryString, cityString = get_country_city()
# print '{} to: {} {}'.format(stat, cityString, countryString).decode('utf8')
# print ' '.join(status.split()[4:])+'. Select to Disconnect.'
wf.add_item('{} to: {} {}'.format(stat, cityString, countryString).decode('utf8'),
subtitle=' '.join(status.split()[4:])+'. Select to Disconnect. Type "relay" to change.',
arg='/usr/local/bin/mullvad disconnect',
valid=True,
icon='icons/mullvad_green.png')
elif stat == 'Disconnected':
wf.add_item(stat,
subtitle='Select to Connect',
arg='/usr/local/bin/mullvad connect',
valid=True,
icon='icons/mullvad_red.png')
elif stat == 'Blocked:':
wf.add_item(stat[:-1],
subtitle='This device is offline, no tunnels can be established...',
arg='/usr/local/bin/mullvad reconnect',
valid=True,
icon='icons/mullvad_red.png')
def get_country_city():
""" Get the current country and city relay information
:returns countryString:
"""
# TODO: make this work for OpenVPN as well as Wireguard
getProt = get_protocol()
# print 'getProt: {}'.format(getProt)
sep = getProt.index(',')
# print 'sep: {}'.format(sep)
countryCodeSearch = '({})'.format(getProt[sep+2:sep+4])
# print 'DEBUG: countryCodeSearch', countryCodeSearch #debug
cityCodeSearch = '({})'.format(getProt[sep-3:sep])
# cityCodeSearch = '({})'.format(get_protocol()[8][0:3])
# print 'DEBUG: cityCodeSearch', cityCodeSearch
countries = wf.cached_data('mullvad_country_list',
get_country_list,
max_age=432000)
# print 'DEBUG: countries', countries #debug
index = [i for i,s in enumerate(countries) if countryCodeSearch in s][0]
relayList = wf.cached_data('mullvad_relay_list',
get_relay_list,
max_age=432000)
countryString = countries[index].split()[:-1][0]
# print countryString #debug
cityString = ' '.join([city[0] for city in relayList[index][1:] if cityCodeSearch in city[0]][0].split()[:-1])
# print cityString #debug
return countryString, cityString
def get_connection():
""" VPN connection tunnel status
:returns: sentence of tunnel status
:type returns: tuple of a single sentence string
"""
return execute(['mullvad', 'status']).splitlines()
def check_connection():
wf.add_item('check',
subtitle='Check security of connection',
arg='open https://mullvad.net/en/check/',
valid=True,
icon='icons/mullvad_yellow.png')
def set_kill_switch():
for status in get_kill_switch():
if status == 'Network traffic will be blocked when the VPN is disconnected':
killStat = ['Enabled', 'off', 'green']
elif status == 'Network traffic will be allowed when the VPN is disconnected':
killStat = ['Disabled', 'on', 'red']
wf.add_item('Kill switch: ' + killStat[0],
subtitle=status + '. Select to switch',
arg='/usr/local/bin/mullvad always-require-vpn set {}'.format(killStat[1]),
valid=True,
icon='icons/skull_{}.png'.format(killStat[2]))
def get_protocol():
return execute(['mullvad','relay','get'])
def protocol_status():
status = get_protocol().split(':')[1].split()[0]
wf.add_item('Tunnel-protocol: {}'.format(status),
subtitle='Change tunnel-protocol',
autocomplete='protocol',
valid=False,
icon='icons/{}.png'.format(status.lower()))
def set_protocol(query):
for formula in filter_tunnel_protocols(query):
wf.add_item(formula,
subtitle='Change protocol to {}'.format(formula),
arg='/usr/local/bin/mullvad relay set tunnel-protocol {}'.format(formula.lower()),
valid=True,
icon='icons/{}.png'.format(formula.lower()))
def filter_tunnel_protocols(query):
protocols = ['Wireguard', 'OpenVPN', 'Any']
queryFilter = query.split()
if len(queryFilter) > 1:
return wf.filter(queryFilter[1], protocols, match_on=MATCH_SUBSTRING)
return protocols
def set_auto_connect():
for status in get_auto_connect():
wf.add_item(status,
'Current auto-connect status.',
arg='/usr/local/bin/mullvad auto-connect get',
valid=True,
icon='icons/chevron-right-dark.png')
def set_lan():
for status in get_lan():
if status == 'Local network sharing setting: allow':
lanStat = ['Allowed', 'block', 'green']
elif status == 'Local network sharing setting: block':
lanStat = ['Blocked', 'allow', 'red']
wf.add_item('LAN: {}'.format(lanStat[0]),
subtitle=status + '. Select to switch',
arg='/usr/local/bin/mullvad lan set {}'.format(lanStat[1]),
valid=True,
icon='icons/lan_{}.png'.format(lanStat[2])) #TODO two monitors with connecting wires red and green
def set_reconnect():
for status in get_connection():
wf.add_item('Reconnect',
subtitle=status,
arg='/usr/local/bin/mullvad reconnect',
valid=True,
icon='icons/chevron-right-dark.png') #TODO recycle loop arrow orange/yellow
def unsupported_mullvad():
wf.add_item('Mullvad app is not supported',
subtitle='The currently installed version of this app is not supported',
arg='open https://mullvad.net/en/help/tag/mullvad-app/',
valid=True,
icon='icons/chevron-right-dark.png') #TODO orange/yellow '?' icon
def update_mullvad():
# TODO: Download with something that ships with macOS rather than brewed `wget` in /usr/local/bin/
latestVersion = wf.cached_data('mullvad_version', data_func=get_version, max_age=86400)[2]
# print(latestVersion)
wf.add_item('Update mullvad',
subtitle='The currently installed version of Mullvad is out-of-date',
arg='/usr/local/bin/wget https://github.com/mullvad/mullvadvpn-app/releases/download/{}/MullvadVPN-{}.pkg -P ~/Downloads/ ; wait && open ~/Downloads/MullvadVPN-{}.pkg'.format(latestVersion,latestVersion,latestVersion),
valid=True,
icon='icons/cloud-download-dark.png')
def get_account():
getAcct = execute(['mullvad', 'account', 'get']).splitlines()
deltaDays = (datetime.strptime(getAcct[1].split()[3], '%Y-%m-%d') - datetime.utcnow()).days
return [getAcct[0].split()[2], deltaDays]
def add_time_account():
formulas = wf.cached_data('mullvad_account',
get_account,
max_age=86400)
wf.add_item('Account: {} expires in: {} days'.format(formulas[0], formulas[1]),
subtitle='Open mullvad account website and copy account number to clipboard',
arg='echo {} | pbcopy && open https://mullvad.net/en/account/'.format(formulas[0]), # copy account number to clipboard and open mullvad account login screen
valid=True,
icon='icons/browser.png')
#TODO delete cache
def update_relay_list():
# TODO add this to its own subroutine that gets run in the background
execute(['mullvad', 'relay', 'update'])
def list_relay_countries(wf, query):
""" List countries with servers
Arguments:
query -- "relay"
"""
# TODO: does `query` need to be here?
# print query
for country in filter_relay_countries(wf, query):
countryName = country.split(' (')[0]
countryCode = country.split('(')[1].split(')')[0]
wf.add_item(country,
subtitle='List cities in {}'.format(countryName),
valid=False, # TABing and RETURN have the same effect, take you to city selection
autocomplete='country:{} '.format(countryCode),
icon='icons/chevron-right-dark.png') # TODO lock icon, or maybe just chevron
def filter_relay_countries(wf, query):
""" List contries based on fuzzy match of query
Returns:
List of countries as strings
"""
# print query
countries = wf.cached_data('mullvad_country_list',
get_country_list,
max_age=432000)
# print query
queryFilter = query.split()
# print query, queryFilter
if len(queryFilter) > 1:
return wf.filter(queryFilter[1], countries, match_on=MATCH_SUBSTRING)
return countries
def get_country_list():
countries = []
formulas = wf.cached_data('mullvad_relay_list',
get_relay_list,
max_age=432000)
for formula in formulas:
countries.append(formula[0].decode('utf8'))
return countries
def get_relay_list():
i = -1
relayList = []
for line in execute(['mullvad', 'relay', 'list']).splitlines():
if line.strip(): # if the line is not empty
if line[0] != '\t': # country
i += 1
j = 0
relayList.append([line])
elif line[0] == '\t' and line[1] != '\t': # city
j += 1
relayList[i].append([line.split("@")[0].strip()])
elif line[:2] == '\t\t': # server
relayList[i][j].append(line.split())
return relayList
def list_relay_cities(wf, query):
""" List cities of country
Argument:
query -- country:`countryCode` where `countryCode` is a two letter abbreviation of a country from list_relay_countries()
Returns:
List of Items of cities
"""
countryCode = query.split(':')[1].split()[0]
for city in filter_relay_cities(wf, countryCode, query):
cityCode = city.split('(')[1].split(')')[0]
wf.add_item(city,
subtitle='Connect to servers in this city',
arg='/usr/local/bin/mullvad relay set location {} {}'.format(countryCode,cityCode),
valid=True,
icon='icons/chevron-right-dark.png') #TODO maybe add red locks for servers that arent being currently used and green lock for the server that is connected to currently
def get_city_list(wf, countryCode):
relayList = wf.cached_data('mullvad_relay_list',
get_relay_list,
max_age=432000)
countries = wf.cached_data('mullvad_country_list',
get_country_list,
max_age=432000)
countryCodeSearch = '({})'.format(countryCode)
index = [i for i, s in enumerate(countries) if countryCodeSearch in s][0]
cities = []
for city in relayList[index][1:]:
cities.append(city[0].decode('utf8'))
wf.cache_data('mullvad_cities_list', cities)
def filter_relay_cities(wf, countryCode, query):
cities = wf.cached_data('mullvad_cities_list',
get_city_list(wf, countryCode),
max_age=1)
queryFilter = query.split()
if len(queryFilter) > 1:
return wf.filter(queryFilter[1], cities, match_on=MATCH_SUBSTRING)
return cities
#############################
######## MAIN ########
#############################
def main(wf):
# TODO: update workflow option
if wf.update_available:
wf.add_item('An update is available!',
autocomplete='workflow:update',
valid=False,
icon='icons/cloud-download-dark.png')
# extract query
query = wf.args[0] if len(wf.args) else None # if there's an argument(s) `query` is the first one. Otherwise it's `None`
if not query: # starting screen of information.
if wf.cached_data('mullvad_version',
get_version,
max_age=86400)[1] != wf.cached_data('mullvad_version',
get_version,
max_age=86400)[2]:
update_mullvad()
if wf.cached_data('mullvad_version',
get_version,
max_age=86400)[0] == False:
unsupported_mullvad()
if wf.cached_data('mullvad_account',
get_account,
max_age = 86400)[1] <= 5:
add_time_account()
connection_status()
set_kill_switch()
protocol_status()
set_lan()
check_connection()
set_auto_connect()
for action in mullvad_actions.ACTIONS:
if action['name'] in ['relay', 'reconnect', 'account']:
wf.add_item(action['name'], action['description'],
uid=action['name'],
autocomplete=action['autocomplete'],
arg=action['arg'],
valid=action['valid'],
icon=action['icon'])
if query and query.startswith('check'):
check_connection()
elif query and any(query.startswith(x) for x in ['kill-switch', 'block-when-disconnected']):
set_kill_switch()
elif query and query.startswith('relay'):
list_relay_countries(wf, query)
elif query and query.startswith('country:'):
list_relay_cities(wf, query)
elif query and query.startswith('lan'):
set_lan()
elif query and query.startswith('auto-connect'):
set_auto_connect()
elif query and query.startswith('reconnect'):
set_reconnect()
elif query and query.startswith('protocol'):
set_protocol(query)
elif query and query.startswith('account'):
add_time_account()
elif query and any(query.startswith(x) for x in ['tunnel', 'protocol']):
protocol_status()
elif query:
#TODO change from actions dictionary to a filter function
actions = mullvad_actions.ACTIONS
# filter actions by query
if query:
actions = wf.filter(query, actions,
key=helpers.search_key_for_action,
match_on=MATCH_SUBSTRING)
if len(actions) > 0:
for action in actions:
wf.add_item(action['name'], action['description'],
uid=action['name'],
autocomplete=action['autocomplete'],
arg=action['arg'],
valid=action['valid'],
icon=action['icon'])
else:
wf.add_item('No action found for "%s"' % query,
autocomplete='',
icon='icons/info-dark.png')
if len(wf._items) == 0:
query_name = query[query.find(' ') + 1:]
wf.add_item('No formula found for "%s"' % query_name,
autocomplete='%s ' % query[:query.find(' ')],
icon='icons/chevron-right-dark.png')
wf.send_feedback()
# refresh cache
cmd = ['/usr/bin/python', wf.workflowfile('mullvad_refresh.py')]
run_in_background('mullvad_refresh', cmd)
# run_in_background('cache_account', cache_account)
#############################
######## CALL MAIN ########
#############################
if __name__ == '__main__':
wf = Workflow(update_settings={'github_slug': GITHUB_SLUG})
sys.exit(wf.run(main))
|
# -*- coding: utf-8 -*-
"""
Simple server
Takes i-beacon read data with POST method
Return summary data with GET
MIT License
Copyright (c) 2017 Roman Mindlin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from flask import Flask, request, jsonify, render_template
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.orm import aliased
from flask_migrate import Migrate
import os
from dateutil import parser
import sys
basedir = os.path.abspath(os.path.dirname(__file__))
if sys.version_info < (3, 0):
reload(sys)
sys.setdefaultencoding("utf-8")
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'data.sqlite3')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db)
"""
****************** MODELS ******************
"""
class Beacon(db.Model):
"I-beacon data model"
__tablename__ = 'beacons'
id = db.Column(db.Integer, primary_key=True)
raspi_serial = db.Column(db.String(14), index=True)
ibeacon_uuid = db.Column(db.String(32))
ibeacon_major = db.Column(db.Integer, index=True)
ibeacon_minor = db.Column(db.Integer, index=True)
in_time = db.Column(db.DateTime, index=True)
out_time = db.Column(db.DateTime, index=True)
min_dist = db.Column(db.Integer)
min_time = db.Column(db.DateTime)
@property
def serialize(self):
"Return object data in easily serializeable format"
return {
'id': self.id,
'raspi_serial': self.raspi_serial,
'ibeacon_uuid': self.ibeacon_uuid,
'ibeacon_major': self.ibeacon_major,
'ibeacon_minor': self.ibeacon_minor,
'in_time': self.in_time.isoformat(),
'out_time': self.out_time.isoformat(),
'min_dist': self.min_dist,
'min_time': self.min_time.isoformat()
}
class Gate(db.Model):
"I-Beacon agents pairs"
__tablename__ = 'gates'
id = db.Column(db.Integer, primary_key=True)
raspi_serial_left = db.Column(db.String(14), index=True)
raspi_serial_right = db.Column(db.String(14), index=True)
distance = db.Column(db.Integer)
@property
def serialize(self):
"""Return object data in easily serializeable format"""
return {
'id': self.id,
'raspi_serial_left': self.raspi_serial_left,
'raspi_serial_right': self.raspi_serial_right,
'distance': self.distance
}
class Event(db.Model):
"Gate passing through event"
__tablename__ = 'events'
id = db.Column(db.Integer, primary_key=True)
gate_id = db.Column(db.Integer, index=True)
ibeacon_uuid = db.Column(db.String(32))
ibeacon_major = db.Column(db.Integer)
ibeacon_minor = db.Column(db.Integer)
in_time = db.Column(db.DateTime, index=True)
out_time = db.Column(db.DateTime, index=True)
min_time_left = db.Column(db.DateTime)
min_time_right = db.Column(db.DateTime)
course = db.Column(db.Enum('left', 'center', 'right', 'wide'))
@property
def serialize(self):
"Return object data in easily serializeable format"
return {
'id': self.id,
'gate_id': self.gate_id,
'ibeacon_uuid': self.ibeacon_uuid,
'ibeacon_major': self.ibeacon_major,
'ibeacon_minor': self.ibeacon_minor,
'in_time': self.in_time.isoformat(),
'out_time': self.out_time.isoformat(),
'min_time_left': self.min_time_left.isoformat(),
'min_time_right': self.min_time_right.isoformat(),
'course': self.course
}
"""
****************** MESSAGES ******************
"""
@app.route('/api/messages/', methods=['GET'])
def get_messages():
"Sends back all messages"
content = Beacon.query.all()
return jsonify([i.serialize for i in content]), 200
@app.route('/api/messages/', methods=['POST'])
def add_message():
"Inputs new message and saves it in db"
content = request.get_json(silent=True, force=False)
if content:
new_message = Beacon(raspi_serial=content.get('raspi_serial'),
ibeacon_uuid=content.get('ibeacon_uuid'),
ibeacon_major=content.get('ibeacon_major'),
ibeacon_minor=content.get('ibeacon_minor'),
in_time=parser.parse(content.get('in_time')),
out_time=parser.parse(content.get('out_time')),
min_dist=int(float(content.get('min_dist'))),
min_time=parser.parse(content.get('min_time')))
# check if same record exists
if db.session.query(Beacon.id).filter((Beacon.raspi_serial == new_message.raspi_serial) &
(Beacon.ibeacon_uuid == new_message.ibeacon_uuid) &
(Beacon.ibeacon_major == new_message.ibeacon_major) &
(Beacon.ibeacon_minor == new_message.ibeacon_minor) &
(Beacon.in_time == new_message.in_time) &
(Beacon.out_time == new_message.out_time) &
(Beacon.min_dist == new_message.min_dist) &
(Beacon.min_time == new_message.min_time)).count() == 0:
db.session.add(new_message)
return "<h1>Ok</h1>", 200
else:
return "<h1>Error</h1>", 400
else:
return "<h1>Error</h1>", 400
@app.route('/api/messages/<int:id>', methods=['PUT'])
def update_message(id):
"Update message with given id"
content = request.get_json(silent=True, force=False)
if content:
try:
message = Beacon.query.filter(Beacon.id == id).first()
message.raspi_serial = content.get('raspi_serial')
message.ibeacon_uuid = content.get('ibeacon_uuid')
message.ibeacon_major = content.get('ibeacon_major')
message.ibeacon_minor = content.get('ibeacon_minor')
message.in_time = parser.parse(content.get('in_time'))
message.out_time = parser.parse(content.get('out_time'))
message.min_dist = int(content.get('min_dist'))
message.min_time = parser.parse(content.get('min_time'))
db.session.commit()
except:
return "<h1>Error</h1>", 404
else:
return "<h1>Error</h1>", 400
return "<h1>Ok</h1>", 200
@app.route('/api/messages/<int:id>', methods=['DELETE'])
def delete_message(id):
"Delete message with given id"
try:
Beacon.query.filter(Beacon.id == id).delete(synchronize_session='evaluate')
except:
return "<h1>Error</h1>", 404
return "<h1>Ok</h1>", 200
"""
****************** GATES ******************
"""
@app.route('/api/gates/', methods=['GET'])
def get_gates():
"Sends back all gates"
content = Gate.query.all()
return jsonify([i.serialize for i in content]), 200
@app.route('/api/gates/', methods=['POST'])
def add_gate():
"Inputs new gate and saves it in db"
content = request.get_json(silent=True, force=False)
if content:
new_gate = Gate(raspi_serial_left=content.get('raspi_serial_left'),
raspi_serial_right=content.get('raspi_serial_right'),
distance=content.get('distance'))
# check if same record exists
if db.session.query(Gate.id).filter((Gate.raspi_serial_left == new_gate.raspi_serial_left) &
(Gate.raspi_serial_right == new_gate.raspi_serial_right) &
(Gate.distance == new_gate.distance)).count() == 0:
db.session.add(new_gate)
return "<h1>Ok</h1>", 200
else:
return "<h1>Error</h1>", 400
else:
return "<h1>Error</h1>", 400
@app.route('/api/gates/<int:id>', methods=['PUT'])
def update_gate(id):
"Update gate with given id"
content = request.get_json(silent=True, force=False)
if content:
try:
gate = Gate.query.filter(Gate.id == id).first()
gate.raspi_serial_left = content.get('raspi_serial_left')
gate.raspi_serial_right = content.get('raspi_serial_right')
gate.distance = content.get('distance')
db.session.commit()
except:
return "<h1>Error</h1>", 404
else:
return "<h1>Error</h1>", 400
return "<h1>Ok</h1>", 200
"""
****************** EVENTS ******************
"""
@app.route('/api/events/', methods=['GET'])
def get_events():
"Sends back all events"
content = Event.query.all()
return jsonify([i.serialize for i in content]), 200
@app.route('/api/events/', methods=['POST'])
def add_event():
"Inputs new event and saves it in db"
content = request.get_json(silent=True, force=False)
if content:
new_event = Event(gate_id=content.get('gate_id'),
ibeacon_uuid=content.get('ibeacon_uuid'),
ibeacon_major=content.get('ibeacon_major'),
ibeacon_minor=content.get('ibeacon_minor'),
in_time=parser.parse(content.get('in_time')),
out_time=parser.parse(content.get('out_time')),
course=content.get('course'))
# check if same record exists
if db.session.query(Event.id).filter((Event.gate_id == new_event.gate_id) &
(Event.ibeacon_uuid == new_event.ibeacon_uuid) &
(Event.ibeacon_major == new_event.ibeacon_major) &
(Event.ibeacon_minor == new_event.ibeacon_minor) &
(Event.in_time == new_event.in_time) &
(Event.out_time == new_event.out_time) &
(Event.course == new_event.course)).count() == 0:
db.session.add(new_event)
return "<h1>Ok</h1>", 200
else:
return "<h1>Error</h1>", 400
else:
return "<h1>Error</h1>", 400
@app.route('/api/events/<int:id>', methods=['PUT'])
def update_event(id):
"Update event with given id"
content = request.get_json(silent=True, force=False)
if content:
try:
event = Event.query.filter(Event.id == id).first()
event.gate_id = content.get('gate_id')
event.ibeacon_uuid = content.get('ibeacon_uuid')
event.ibeacon_major = content.get('ibeacon_major')
event.ibeacon_minor = content.get('ibeacon_minor')
event.in_time = parser.parse(content.get('in_time'))
event.out_time = parser.parse(content.get('out_time'))
event.course = content.get('course')
db.session.commit()
except:
return "<h1>Error</h1>", 404
else:
return "<h1>Error</h1>", 400
return "<h1>Ok</h1>", 200
@app.route('/api/events/<int:id>', methods=['DELETE'])
def delete_event(id):
"Delete event with given id"
try:
Event.query.filter(Event.id == id).delete(synchronize_session='evaluate')
except:
return "<h1>Error</h1>", 404
return "<h1>Ok</h1>", 200
"""
****************** TABLES ******************
"""
@app.route('/messages', methods=['GET'])
def messages():
return render_template("messages.html")
@app.route('/gates/', methods=['GET'])
def gates():
return render_template("gates.html")
@app.route('/', methods=['GET'])
def events():
return render_template("events.html")
"""
****************** UTILS ******************
"""
@app.route('/api/gates/<int:id>', methods=['DELETE'])
def delete_gate(id):
"Delete gate with given id"
try:
Gate.query.filter(Gate.id == id).delete(synchronize_session='evaluate')
except:
return "<h1>Error</h1>", 404
return "<h1>Ok</h1>", 200
@app.route('/api/collect_items/', methods=['GET'])
def process_overlapps():
"""
Find overlapping events in Beacon model for all gates from Pair
If found, calculate course
Store to Event
select b1.raspi_serial raspi_one, b1.min_dist dist_one, b2.min_dist dist_two, b1.ibeacon_uuid, b1.ibeacon_major, b1.ibeacon_minor, b1.in_time, b2.out_time
from beacons b1
inner join beacons b2
on b1.ibeacon_uuid = b2.ibeacon_uuid
and b1.ibeacon_major = b2.ibeacon_major
and b1.ibeacon_minor = b2.ibeacon_minor
and b1.in_time < b2.in_time
and b1.out_time > b2.in_time
where b1.raspi_serial = "000000000f6570bb" or b1.raspi_serial = "00000000f56eacba"
"""
try:
gates = Gate.query.all() # List of all available gates
for gate in gates:
# alias, SQLAlchemy cannot join table to itself
b1 = aliased(Beacon)
b2 = aliased(Beacon)
query = db.session.query(b1.raspi_serial.label('raspi_one'),
b1.min_dist.label('dist_one'),
b2.min_dist.label('dist_two'),
b1.ibeacon_uuid,
b1.ibeacon_major,
b1.ibeacon_minor,
b1.in_time,
b2.out_time,
b1.min_time.label('time_one'),
b2.min_time.label('time_two'))
# sub_query filters records for current gate
query = query.filter((b1.raspi_serial == gate.raspi_serial_left) |
(b1.raspi_serial == gate.raspi_serial_right))
# main query, seek for overlapping time intervals for each i-beacon
query = query.join(b2, (b1.in_time < b2.in_time) &
(b1.out_time > b2.in_time) &
(b1.ibeacon_uuid == b2.ibeacon_uuid) &
(b1.ibeacon_major == b2.ibeacon_major) &
(b1.ibeacon_minor == b2.ibeacon_minor))
records = query.all()
for record in records:
# Find left and right side's distance
if record.raspi_one in db.session.query(Gate.raspi_serial_left).all()[0]:
dist_left = record.dist_one
time_left = record.time_one
dist_right = record.dist_two
time_right = record.time_two
else:
dist_right = record.dist_one
time_right = record.time_one
dist_left = record.dist_two
time_left = record.time_two
# Find course
if dist_left > gate.distance or dist_right > gate.distance:
course = 'wide'
elif dist_left <= dist_right // 2:
course = 'left'
elif dist_right <= dist_left // 2:
course = 'right'
else:
course = 'center'
new_event = Event(gate_id=gate.id,
ibeacon_uuid=record.ibeacon_uuid,
ibeacon_major=record.ibeacon_major,
ibeacon_minor=record.ibeacon_minor,
in_time=record.in_time,
out_time=record.out_time,
min_time_left = time_left,
min_time_right = time_right,
course=course)
if db.session.query(Event.id).filter((Event.gate_id == new_event.gate_id) &
(Event.ibeacon_uuid == new_event.ibeacon_uuid) &
(Event.ibeacon_major == new_event.ibeacon_major) &
(Event.ibeacon_minor == new_event.ibeacon_minor) &
(Event.in_time == new_event.in_time) &
(Event.out_time == new_event.out_time) &
(Event.min_time_left == new_event.min_time_left) &
(Event.min_time_right == new_event.min_time_right) &
(Event.course == new_event.course)).count() == 0:
db.session.add(new_event)
except:
return "<h1>Error</h1>", 400
return "<h1>Ok</h1>", 200
@app.errorhandler(404)
def page_not_found(e):
"404 error handler"
return "<h1>Error 404</h1>", 404
if __name__ == "__main__":
app.run(debug=False, host='0.0.0.0', port=80)
|
from collections import defaultdict
class Solution:
def diagonalSort(self, mat: List[List[int]]) -> List[List[int]]:
d = defaultdict(list)
m, n = len(mat), len(mat[0])
res = [[0]*n for _ in range(m)]
for i in range(m):
for j in range(n):
d[i-j].append(mat[i][j])
for k in d:
d[k].sort(reverse=True)
for i in range(m):
for j in range(n):
res[i][j] = d[i-j].pop()
return res
|
# pylint: disable=invalid-name,protected-access
import pytest
from allennlp.common.testing import ModelTestCase
from allennlp.common.checks import ConfigurationError
from allennlp.common.params import Params
from allennlp.models import Model
class BiattentiveClassificationNetworkMaxoutTest(ModelTestCase):
def setUp(self):
super(BiattentiveClassificationNetworkMaxoutTest, self).setUp()
self.set_up_model('tests/fixtures/biattentive_classification_network/experiment.json',
'tests/fixtures/data/sst.txt')
def test_maxout_bcn_can_train_save_and_load(self):
self.ensure_model_can_train_save_and_load(self.param_file)
def test_feedforward_bcn_can_train_save_and_load(self):
# pylint: disable=line-too-long
self.ensure_model_can_train_save_and_load('tests/fixtures/biattentive_classification_network/feedforward_experiment.json')
def test_batch_predictions_are_consistent(self):
self.ensure_batch_predictions_are_consistent()
def test_mismatching_dimensions_throws_configuration_error(self):
params = Params.from_file(self.param_file)
# Make the encoder wrong - it should be 2 to match
# the embedding dimension from the text_field_embedder.
params["model"]["encoder"]["input_size"] = 10
with pytest.raises(ConfigurationError):
Model.from_params(self.vocab, params.pop("model"))
|
import pytz
from datetime import datetime
from mwd.settings import STAGE, TIME_ZONE
def stage(request):
return {'stage': STAGE}
def year(request):
return {'year': datetime.now(pytz.timezone(TIME_ZONE)).year}
def recaptcha_site_key(request):
return {
'reCAPTCHA_v2_site_key': '6LcM4LQZAAAAAOXJ-CSlz-JrFWKSEl6zfOBmB4Hg',
'reCAPTCHA_v3_site_key': '6Lco1LQZAAAAADGDVe8bUysPItw45Limpm_ZOUC2',
}
|
import sys
import os
import pymongo
import json
import datetime
# important values
block_max = 4369999
quadrillion = 1000000000000000000
# connecting to mongo to get collection
client = pymongo.MongoClient(serverSelectionTimeoutMS=1000)
collection = client["blockchainExtended2"]["blocks"]
def retrieveTransactionValueMinMaxAvg():
# querry
txs = collection.aggregate([{"$unwind": "$transactions"}])
commulated = {}
maximum = {}
minimum = {}
commulated["value"] = 0.0
commulated["gas"] = 0
commulated["gasPrice"] = 0
commulated["input"] = 0
maximum["value"] = 0
maximum["gas"] = 0
maximum["gasPrice"] = 0
maximum["input"] = 0
minimum["value"] = 100000000
minimum["gas"] = 10000000000
minimum["gasPrice"] = 100000000000000
minimum["input"] = 100000000
# iterate
for tx in txs:
commulated["value"] += tx["transactions"]["value"]
commulated["gas"] += int(tx["transactions"]["gas"], 16)
commulated["gasPrice"] += int(tx["transactions"]["gasPrice"], 16)
commulated["input"] += len(tx["transactions"]["input"])
maximum["value"] = max(tx["transactions"]["value"], maximum["value"])
maximum["gas"] = max(int(tx["transactions"]["gas"], 16), maximum["gas"])
maximum["gasPrice"] = max(
int(tx["transactions"]["gasPrice"], 16), maximum["gasPrice"]
)
maximum["input"] = max(len(tx["transactions"]["input"]), maximum["input"])
minimum["value"] = min(tx["transactions"]["value"], minimum["value"])
minimum["gas"] = min(int(tx["transactions"]["gas"], 16), minimum["gas"])
minimum["gasPrice"] = min(
int(tx["transactions"]["gasPrice"], 16), minimum["gasPrice"]
)
minimum["input"] = min(len(tx["transactions"]["input"]), minimum["input"])
# save to json
output = {}
output["commulated"] = commulated
output["maximums"] = maximum
output["minimums"] = minimum
with open("timeseries/minMaxAvg.json", "w") as outfile:
json.dump(output, outfile)
def calculateBlockDataFieldsIntervalAverages():
# TWO WEEKS in SECONDS
fourWeeks = 1209600
# 2 weeks
interval = fourWeeks
# unixtimestamp
startTime = 1438269988
intervalsCommulated = {}
# loop over 2 week intervals
while startTime < 1508340388:
print(datetime.datetime.fromtimestamp(startTime).strftime("%Y-%m-%d %H:%M:%S"))
blocksInInterval = collection.aggregate(
[
{
"$match": {
"$and": [
{"timestamp": {"$lt": (startTime + interval)}},
{"timestamp": {"$gte": startTime}},
]
}
}
]
)
startTime += interval
commulatedSize = 0
commulatedDifficulty = int(0)
commulatedGasUsed = 0
commulatedGasLimit = 0
commulatedDollarPrice = 0.0
commulatedNumberOfTransactions = 0
blocks = 0
# loop over all blocks within the two weeks
for block in blocksInInterval:
commulatedSize += block["size"]
commulatedDifficulty += block["difficulty"]
commulatedGasUsed += block["gasUsed"]
commulatedGasLimit += block["gasLimit"]
commulatedDollarPrice += block["dollarPrice"]
commulatedNumberOfTransactions += len(block["transactions"])
blocks += 1
# calcualte averages
intervalsCommulated[startTime] = {}
intervalsCommulated[startTime]["size"] = commulatedSize / blocks
intervalsCommulated[startTime]["difficulty"] = commulatedDifficulty / blocks
intervalsCommulated[startTime]["gasUsed"] = commulatedGasUsed / blocks
intervalsCommulated[startTime]["gasLimit"] = commulatedGasLimit / blocks
intervalsCommulated[startTime]["dollarPrice"] = commulatedDollarPrice / blocks
intervalsCommulated[startTime]["numberOfTransactions"] = (
commulatedNumberOfTransactions / blocks
)
# save to json
with open("timeseries/blockValueAverages.json", "w") as outfile:
json.dump(intervalsCommulated, outfile)
def calculateTxnDataFieldsIntervalAverages():
# TWO WEEKS in SECONDS
fourWeeks = 1209600
interval = fourWeeks
# unixtimestamp
startTime = 1438269988
intervalsCommulated = {}
while startTime < 1508340388:
print(datetime.datetime.fromtimestamp(startTime).strftime("%Y-%m-%d %H:%M:%S"))
txsInInterval = collection.aggregate(
[
{
"$match": {
"$and": [
{"timestamp": {"$lt": (startTime + interval)}},
{"timestamp": {"$gte": startTime}},
]
}
},
{"$unwind": "$transactions"},
]
)
startTime += interval
commulatedInput = 0
commulatedGas = 0
commulatedGasPrice = 0
commulatedValue = 0.0
blocks = 0
for tx in txsInInterval:
commulatedInput += len(tx["transactions"]["input"])
commulatedGas += int(tx["transactions"]["gas"], 16)
commulatedGasPrice += int(tx["transactions"]["gasPrice"], 16)
commulatedValue += tx["transactions"]["value"]
blocks += 1
intervalsCommulated[startTime] = {}
intervalsCommulated[startTime]["input"] = commulatedInput / blocks
intervalsCommulated[startTime]["gas"] = commulatedGas / blocks
intervalsCommulated[startTime]["gasPrice"] = commulatedGasPrice / blocks
intervalsCommulated[startTime]["value"] = commulatedValue / blocks
with open("timeseries/transactionValueAverages.json", "w") as outfile:
json.dump(intervalsCommulated, outfile)
retrieveTransactionValueMinMaxAvg()
calculateBlockDataFieldsIntervalAverages()
calculateTxnDataFieldsIntervalAverages()
|
import os
from os import listdir
from os.path import isfile, join
import gc
import sys
from sys import getsizeof
from math import floor
import pickle
import timeit
import getopt
from functools import reduce
from itertools import repeat
from multiprocessing import Pool
from numba import njit
import numpy as np
import tensorflow as tf
from datahelper_noflag import *
from tensorflow.keras.datasets import mnist, cifar10, cifar100
from sHAM import huffman
from sHAM import sparse_huffman
from sHAM import sparse_huffman_only_data
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
exec(open("../performance_eval/GPU.py").read())
def make_word_with_info(matr, encoded, bit_words_machine=64):
bit = bit_words_machine
list_string_col =[]
string = ''
row = 0
info = []
for x in np.nditer(encoded, order='F'):
if row == 0:
info += [(len(list_string_col), len(string))]
string += (np.array2string(x)[1:-1])
if len(string) > bit:
bit_overflow = len(string)%bit
list_string_col += [string[:-bit_overflow]]
string = string[-bit_overflow:]
elif len(string) == bit:
list_string_col += [string]
string = ''
row += 1
if row >= matr.shape[0]:
row = 0
bit_remain = len(string)
info += [(len(list_string_col), bit_remain)]
if bit_remain > 0:
string += "0"*(bit-bit_remain) #padding di 0 per renderla lunga bit
list_string_col += [string]
return list_string_col, info
def prepare_interaction_pairs(XD, XT, Y, rows, cols):
drugs = []
targets = []
targetscls = []
affinity=[]
for pair_ind in range(len(rows)):
drug = XD[rows[pair_ind]]
drugs.append(drug)
target=XT[cols[pair_ind]]
targets.append(target)
affinity.append(Y[rows[pair_ind],cols[pair_ind]])
drug_data = np.stack(drugs)
target_data = np.stack(targets)
return drug_data,target_data, affinity
def dict_space(dict_):
space_byte = 0
for key in dict_:
space_byte += getsizeof(key)-getsizeof("") + 1 #byte fine stringa
space_byte += 4 #byte per float32 #dict_[key]
space_byte += 8 #byte per struttura dict
return space_byte
def dense_space(npmatr2d):
if npmatr2d.dtype == np.float64:
byte = 64/8
elif npmatr2d.dtype == np.float32:
byte = 32/8
else:
return npmatr2d.shape[0]*npmatr2d.shape[1]*32/8
return npmatr2d.shape[0]*npmatr2d.shape[1]*byte
def cnn_space(npmatr2d):
if npmatr2d.dtype == np.float64:
byte = 64/8
elif npmatr2d.dtype == np.float32:
byte = 32/8
return npmatr2d.size * byte
def list_of_dense_indexes(model):
index_denses = []
i = 2
for layer in model.layers[2:]:
if type(layer) is tf.keras.layers.Dense:
index_denses.append(i)
i += 1
return index_denses
def make_model_pre_post_dense(model, index_dense):
submodel = tf.keras.Model(inputs=model.input,
outputs=model.layers[index_dense-1].output)
return submodel
def list_of_dense_weights_indexes(list_weights):
indexes_denses_weights = []
i = 2
for w in list_weights[2:]:
if len(w.shape) == 2:
indexes_denses_weights.append(i)
i += 1
return indexes_denses_weights
def list_of_cnn_and_dense_weights_indexes(list_weights):
# Deep DTA
if len(list_weights) == 22:
return [2, 4, 6, 8, 10, 12], [14, 16, 18, 20]
# VGG19
elif len(list_weights) == 114:
return [0, 6, 12, 18, 24, 30, 36, 42, 48, 54, 60, 66, 72, 78, 84, 90], [96, 102, 108]
def make_huffman(model, lodi, lodwi, lw):
bit_words_machine = 64
vect_weights = [np.hstack(lw[i]).reshape(-1,1) for i in lodwi]
all_vect_weights = np.concatenate(vect_weights, axis=None).reshape(-1,1)
symb2freq = huffman.dict_elem_freq(all_vect_weights)
e = huffman.encode(symb2freq)
d_rev = huffman.reverse_elements_list_to_dict(e)
d = dict(e)
encodeds = [huffman.matrix_with_code(lw[l], d, d_rev) for l in lodwi]
list_bins = [huffman.make_words_list_to_int(encoded, bit_words_machine) for encoded in encodeds]
int_from_strings = [huffman.convert_bin_to_int(list_bin) for list_bin in list_bins]
space_dense = sum([dense_space(lw[i]) for i in lodwi])
space_huffman = dict_space(d_rev)
space_huffman += sum([bit_words_machine/8 * (len(int_from_string)) for int_from_string in int_from_strings])
return space_dense, space_huffman
def space_for_row_cum(matr, list_):
len_ = matr.shape[0]
if len_ < 2**8:
return 1 * len(list_)
elif len_ < 2**16:
return 2 * len(list_)
elif len_ < 2**32:
return 4 * len(list_)
return 8 * len(list_)
def make_huffman_sparse_par(model, lodi, lodwi, lw):
bit_words_machine = 64
vect_weights = [np.hstack(lw[i]).reshape(-1,1) for i in lodwi]
all_vect_weights = np.concatenate(vect_weights, axis=None).reshape(-1,1)
symb2freq = huffman.dict_elem_freq(all_vect_weights[all_vect_weights != 0])
e = huffman.encode(symb2freq)
d_rev = huffman.reverse_elements_list_to_dict(e)
d = dict(e)
dense_inputs = []
space_dense = 0
space_sparse_huffman = 0
space_sparse_huffman += dict_space(d_rev)
for l in lodwi:
matr_shape, int_data, d_rev_data, row_index, cum, expected_c, min_length_encoded = sparse_huffman_only_data.do_all_for_me(lw[l], bit_words_machine)
# data, row_index, cum = sparse_huffman.convert_dense_to_csc(lw[l])
# print(len(cum), len(row_index))
# d_data, d_rev_data = sparse_huffman_only_data.huffman_sparse_encoded_dict(data)
# data_encoded = sparse_huffman_only_data.encoded_matrix(data, d_data, d_rev_data)
# int_from_strings = huffman.convert_bin_to_int(huffman.make_words_list_to_int(data_encoded, bit_words_machine))
space_dense += dense_space(lw[l])
space_sparse_huffman += bit_words_machine/8 * len(int_data)
space_sparse_huffman += space_for_row_cum(lw[l], cum) + space_for_row_cum(lw[l], row_index)
return space_dense, space_sparse_huffman
def split_filename(fn):
c = fn.split(sep="-")
if len(c) == 2:
return 0, c[0], c[1]
elif len(c) == 3:
return c[0], c[1], c[2]
elif len(c) == 5:
return c[0], c[2], c[4]
# Get the arguments from the command-line except the filename
argv = sys.argv[1:]
try:
string_error = 'usage: testing_time_space.py -t <type of compression> -d <directory of compressed weights> -m <file original keras model>'
# Define the getopt parameters
opts, args = getopt.getopt(argv, 't:d:m:s:q:', ['type', 'directory', 'model', 'dataset', 'quant'])
if len(opts) != 5:
print (string_error)
# Iterate the options and get the corresponding values
else:
for opt, arg in opts:
if opt == "-t":
#print("type: ", arg)
type_compr = arg
elif opt == "-d":
#print("directory: ", arg)
directory = arg
elif opt == "-m":
#print("model_file: ", arg)
model_file=arg
elif opt == "-q":
pq = True if arg==1 else False
elif opt == "-s":
if arg == "kiba":
#print(arg)
# data loading
dataset_path = '../performance_eval/DeepDTA/data_utils/kiba/'
dataset = DataSet( fpath = dataset_path, ### BUNU ARGS DA GUNCELLE
setting_no = 1, ##BUNU ARGS A EKLE
seqlen = 1000,
smilen = 100,
need_shuffle = False )
XD, XT, Y = dataset.parse_data(dataset_path, 0)
XD = np.asarray(XD)
XT = np.asarray(XT)
Y = np.asarray(Y)
test_set, outer_train_sets = dataset.read_sets(dataset_path, 1)
flat_list = [item for sublist in outer_train_sets for item in sublist]
label_row_inds, label_col_inds = np.where(np.isnan(Y)==False)
trrows = label_row_inds[flat_list]
trcol = label_col_inds[flat_list]
drug, targ, aff = prepare_interaction_pairs(XD, XT, Y, trrows, trcol)
trrows = label_row_inds[test_set]
trcol = label_col_inds[test_set]
drug_test, targ_test, aff_test = prepare_interaction_pairs(XD, XT, Y, trrows, trcol)
x_train=[np.array(drug), np.array(targ)]
y_train=np.array(aff)
x_test=[np.array(drug_test), np.array(targ_test)]
y_test=np.array(aff_test)
elif arg == "davis":
# data loading
dataset_path = '../performance_eval/DeepDTA/data_utils/davis/'
dataset = DataSet( fpath = dataset_path, ### BUNU ARGS DA GUNCELLE
setting_no = 1, ##BUNU ARGS A EKLE
seqlen = 1200,
smilen = 85,
need_shuffle = False )
XD, XT, Y = dataset.parse_data(dataset_path, 1)
XD = np.asarray(XD)
XT = np.asarray(XT)
Y = np.asarray(Y)
test_set, outer_train_sets = dataset.read_sets(dataset_path, 1)
flat_list = [item for sublist in outer_train_sets for item in sublist]
label_row_inds, label_col_inds = np.where(np.isnan(Y)==False)
trrows = label_row_inds[flat_list]
trcol = label_col_inds[flat_list]
drug, targ, aff = prepare_interaction_pairs(XD, XT, Y, trrows, trcol)
trrows = label_row_inds[test_set]
trcol = label_col_inds[test_set]
drug_test, targ_test, aff_test = prepare_interaction_pairs(XD, XT, Y, trrows, trcol)
x_train=[np.array(drug), np.array(targ)]
y_train=np.array(aff)
x_test=[np.array(drug_test), np.array(targ_test)]
y_test=np.array(aff_test)
elif arg == "mnist":
#print(arg)
# data loading
((x_train, y_train), (x_test, y_test)) = mnist.load_data()
x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))
x_train = x_train.astype("float32") / 255.0
x_test = x_test.astype("float32") / 255.0
y_train = tf.keras.utils.to_categorical(y_train, 10)
y_test = tf.keras.utils.to_categorical(y_test, 10)
elif arg == "cifar10":
# data loading
num_classes = 10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
y_train = tf.keras.utils.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
# data preprocessing
x_train[:,:,:,0] = (x_train[:,:,:,0]-123.680)
x_train[:,:,:,1] = (x_train[:,:,:,1]-116.779)
x_train[:,:,:,2] = (x_train[:,:,:,2]-103.939)
x_test[:,:,:,0] = (x_test[:,:,:,0]-123.680)
x_test[:,:,:,1] = (x_test[:,:,:,1]-116.779)
x_test[:,:,:,2] = (x_test[:,:,:,2]-103.939)
elif arg == "cifar100":
# data loading
num_classes = 100
(x_train, y_train), (x_test, y_test) = cifar100.load_data()
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
mean = np.mean(x_train,axis=(0,1,2,3))
std = np.std(x_train, axis=(0, 1, 2, 3))
x_train = (x_train-mean)/(std+1e-7)
x_test = (x_test-mean)/(std+1e-7)
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
except getopt.GetoptError:
# Print something useful
print (string_error)
sys.exit(2)
model = tf.keras.models.load_model(model_file)
original_acc = model.evaluate(x_test, y_test, verbose=0)
if isinstance(original_acc, list):
original_acc = original_acc[1]
onlyfiles = [f for f in listdir(directory) if isfile(join(directory, f))]
pruning_l_h = []
ws_l_h = []
diff_acc_h = []
space_h = []
space_sh = []
time_h = []
time_h_p = []
time_h_p_cpp = []
nonzero_h = []
perf = []
pruning_l_sh = []
ws_l_sh = []
diff_acc_sh = []
space_sh = []
time_sh = []
nonzero_sh = []
for weights in sorted(onlyfiles):
print("Applying compression")
gc.collect()
if weights[-3:] == ".h5":
lw = pickle.load(open(directory+weights, "rb"))
model.set_weights(lw)
cnnIdx, denseIdx = list_of_cnn_and_dense_weights_indexes(lw)
if type_compr == "also_cnn" or type_compr == "only_cnn":
space_expanded_cnn = sum([cnn_space(lw[i]) for i in cnnIdx])
else:
space_expanded_cnn = 0
# Estraggo quanti simboli sono usati effettivamente nei liv conv
if type_compr == "also_cnn" or type_compr == "only_cnn":
vect_weights =[np.hstack(lw[i]).reshape(-1,1) for i in cnnIdx]
all_vect_weights = np.concatenate(vect_weights, axis=None).reshape(-1,1)
uniques = np.unique(all_vect_weights)
num_values = len(uniques)
space_compr_cnn = (num_values*32 + math.ceil(np.log2(num_values)) * sum([lw[i].size for i in cnnIdx])) / 8
#type_compr = "all"
else:
space_compr_cnn = space_expanded_cnn
pr, ws, acc = split_filename(weights)
ws_acc = float(acc[:-3])
#print("{}% & {} --> {}".format(pr, ws, ws_acc))
perf += [ws_acc]
lodi = list_of_dense_indexes(model)
lodwi = list_of_dense_weights_indexes(lw)
# for l in lodwi:
# print(lw[l])
# #print(np.unique(lw[l]))
# print(len(np.nonzero(lw[l])[0]))
# print(lw[l].size)
# print()
assert len(lodi) == len(lodwi)
non_zero = []
if type_compr == "ham":
space_dense, space_huffman = make_huffman(model, lodi, lodwi, lw)
elif type_compr == "sham":
space_dense, space_shuffman = make_huffman_sparse_par(model, lodi, lodwi, lw)
elif type_compr in ["all", "also_cnn"]:
space_dense, space_huffman = make_huffman(model, lodi, lodwi, lw)
space_dense, space_shuffman = make_huffman_sparse_par(model, lodi, lodwi, lw)
pruning_l_h.append(pr)
ws_l_h.append(ws)
diff_acc_h.append(round(ws_acc-original_acc, 5))
if type_compr in ["ham", "all", "also_cnn"]:
space_h.append(round((space_compr_cnn + space_huffman)/(space_expanded_cnn + space_dense), 5)) # Tengo conto anche di cnn
if type_compr in ["sham", "all", "also_cnn"]:
space_sh.append(round((space_compr_cnn + space_shuffman)/(space_expanded_cnn + space_dense), 5))
nonzero_h.append(non_zero)
if type_compr in ["only_conv"]:
space_h.append(round((space_compr_cnn)/(space_expanded_cnn)))
print(f"\tCompressed performance test: {perf[-1]}")
if type_compr == "ham":
#print("{} {} acc1, space {} ".format(ws_l_h[-1], diff_acc_h[-1], space_h[-1]))
print(f"\tSpace occupancy of compressed model with HAM: {space_h[-1]}")
elif type_compr == "sham":
### Commentato per salvare solo i tempi, non i rapporti
# print("{} {} acc1, space {}, time p {} time p cpp {} ".format(ws_l_h[-1], diff_acc_h[-1], space_h[-1], time_h_p[-1], time_h_p_cpp[-1]))
#print("{} {} acc1, space {}".format(ws_l_h[-1], diff_acc_h[-1], space_sh[-1]))
print(f"\tSpace occupancy of compressed model with sHAM: {space_sh[-1]}")
####
elif type_compr in ["all", "also_cnn"] :
#print("{} {} acc1, spaceh {}, spacesh {}".format(ws_l_h[-1], diff_acc_h[-1], space_h[-1], space_sh[-1], ))
print(f"\tSpace occupancy of compressed model with HAM: {space_h[-1]}")
print(f"\tSpace occupancy of compressed model with sHAM: {space_sh[-1]}")
#elif type_compr == "only_conv":
### Commentato per salvare solo i tempi, non i rapporti
# print("{} {} acc1, space {}, time p {} time p cpp {} ".format(ws_l_h[-1], diff_acc_h[-1], space_h[-1], time_h_p[-1], time_h_p_cpp[-1]))
#print("{} {} acc1, space {}".format(ws_l_h[-1], diff_acc_h[-1], space_h[-1]))
if type_compr == "ham":
str_res = "results/huffman_upq.txt" if pq else "results/huffman_pruws.txt"
with open(str_res, "a+") as tex:
tex.write(directory)
tex.write("\npruning = {} \nclusters = {}\ndiff_acc = {}\nperf = {}\nspace = {} \n".format(pruning_l_h, ws_l_h, diff_acc_h, perf, space_h))
elif type_compr == "sham":
str_res = "results/huffman_upq.txt" if pq else "results/huffman_pruws_sparse.txt"
with open(str_res, "a+") as tex:
tex.write(directory)
tex.write("\npruning = {} \nclusters = {}\ndiff_acc = {}\nperf = {}\nspace = {}\n".format(pruning_l_h, ws_l_h, diff_acc_h, perf, space_h))
if type_compr in ["all", "also_cnn"]:
str_res = "results/all_upq.txt" if pq else "results/all_pruws.txt"
with open(str_res, "a+") as tex:
tex.write(directory)
tex.write("\npruning = {} \nclusters = {}\ndiff_acc = {}\nperf = {}\nspaceh = {}\nspacesh = {}\n".format(pruning_l_h, ws_l_h, diff_acc_h, perf, space_h, space_sh))
if type_compr == "only_conv":
str_res = "results/all_upq.txt" if pq else "results/all_pruws.txt"
with open(str_res, "a+") as tex:
tex.write(directory)
tex.write("\npruning = {} \nclusters = {}\ndiff_acc = {}\nperf = {}\nspaceh = {}\n".format(pruning_l_h, ws_l_h, diff_acc_h, perf, space_h))
|
import mimetypes
import os
import shutil
import tempfile
import time
from datetime import datetime, timedelta
import pandas as pd
import shapely.wkt
from django.db import connection
from django.db.models import Q
from django.http import FileResponse
from django.shortcuts import render
from django.utils.decorators import method_decorator
from django.views.decorators.cache import cache_page
from django.views.decorators.vary import vary_on_cookie
from paramiko.ssh_exception import AuthenticationException
from rest_framework import permissions, status, viewsets
from rest_framework.exceptions import (APIException, AuthenticationFailed,
NotFound, PermissionDenied)
from rest_framework.response import Response
from rest_framework.views import APIView
from satlomas.renderers import BinaryFileRenderer
from scopes.models import Scope
from jobs.utils import enqueue_job
from .clients import SFTPClient
from .models import Raster, CoverageRaster
from .serializers import (RasterSerializer, ImportSFTPListSerializer,
ImportSFTPSerializer)
# def intersection_area_sql(scope_geom, period):
# mask = Mask.objects.filter(period=period, mask_type='loss').first()
# query = """SELECT ST_Area(a.int) AS area
# FROM (
# SELECT ST_Intersection(
# ST_Transform(ST_GeomFromText('{wkt_scope}', 4326), {srid}),
# ST_Transform(ST_GeomFromText('{wkt_mask}', 4326), {srid})) AS int) a;
# """.format(wkt_scope=scope_geom.wkt, wkt_mask=mask.geom.wkt)
# with connection.cursor() as cursor:
# cursor.execute(query)
# return cursor.fetchall()[0][0]
# def select_mask_areas_by_scope(**params):
# query = """
# SELECT m.id, m.date_to, ST_Area(ST_Transform(
# ST_Intersection(m.geom, s.geom), %(srid)s)) AS area
# FROM (
# SELECT m.id, m.geom, p.date_to
# FROM lomas_changes_mask AS m
# INNER JOIN lomas_changes_period AS p ON m.period_id = p.id
# WHERE p.date_to BETWEEN %(date_from)s AND %(date_to)s AND m.mask_type = %(mask_type)s
# ) AS m
# CROSS JOIN (SELECT geom FROM scopes_scope AS s WHERE s.id = %(scope_id)s) AS s
# """
# with connection.cursor() as cursor:
# cursor.execute(query, dict(srid=32718, mask_type='loss', **params))
# return [
# dict(id=id, date=date, area=area)
# for (id, date, area) in cursor.fetchall()
# ]
# def select_mask_areas_by_geom(**params):
# query = """
# SELECT m.id, m.date_to, ST_Area(ST_Transform(
# ST_Intersection(m.geom, ST_GeomFromText(%(geom_wkt)s, 4326)), %(srid)s)) AS area
# FROM lomas_changes_mask AS m
# INNER JOIN lomas_changes_period AS p ON m.period_id = p.id
# WHERE p.date_to BETWEEN %(date_from)s AND %(date_to)s AND m.mask_type = %(mask_type)s
# """
# with connection.cursor() as cursor:
# cursor.execute(query, dict(srid=32718, mask_type='loss', **params))
# return [
# dict(id=id, date=date, area=area)
# for (id, date, area) in cursor.fetchall()
# ]
# class TimeSeries(APIView):
# permission_classes = [permissions.AllowAny]
# @method_decorator(cache_page(60 * 60 * 24)) # 1 day
# @method_decorator(vary_on_cookie)
# def get(self, request):
# params = request.query_params
# data = {
# k: params.get(k)
# for k in ('scope', 'geom', 'date_from', 'date_to') if k in params
# }
# scope_id = int(data['scope']) if 'scope' in data else None
# custom_geom = data['geom'] if 'geom' in data else None
# if scope_id is None and custom_geom is None:
# raise APIException(
# "Either 'scope' or 'geom' parameters are missing")
# date_from = datetime.strptime(data['date_from'], "%Y-%m-%d")
# date_to = datetime.strptime(data['date_to'], "%Y-%m-%d")
# values = []
# if custom_geom:
# geom = shapely.wkt.loads(custom_geom)
# values = select_mask_areas_by_geom(geom_wkt=geom.wkt,
# date_from=date_from,
# date_to=date_to)
# else:
# values = select_mask_areas_by_scope(scope_id=scope_id,
# date_from=date_from,
# date_to=date_to)
# values = None
# return Response(dict(values=values))
class AvailableDates(APIView):
permission_classes = [permissions.AllowAny]
def get(self, request):
rasters = CoverageRaster.objects.all().order_by('date')
types = request.query_params.get('type', None)
if types:
rasters = rasters.filter(raster__slug__in=types.split(','))
if rasters.count() > 0:
response = dict(
first_date=rasters.first().date,
last_date=rasters.last().date,
availables=[dict(id=r.id, date=r.date) for r in rasters])
return Response(response)
else:
return Response(
dict(first_date=None, last_date=None, availables=[]))
class RasterViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Raster.objects.all().order_by('-created_at')
serializer_class = RasterSerializer
permission_classes = [permissions.AllowAny]
def get_queryset(self):
queryset = self.queryset
date_from = self.request.query_params.get('from', None)
date_to = self.request.query_params.get('to', None)
if date_from is not None and date_to is not None:
queryset = queryset.filter(
Q(period__date_from=date_from)
| Q(period__date_to=date_to))
slug = self.request.query_params.get('slug', None)
if slug:
queryset = queryset.filter(slug=slug)
return queryset
class RasterDownloadView(APIView):
renderer_classes = (BinaryFileRenderer, )
def get(self, request, pk):
file = Raster.objects.filter(pk=int(pk)).first()
if not file:
raise NotFound(detail=None, code=None)
return self.try_download_file(file)
def try_download_file(self, file):
# Copy file from storage to a temporary file
tmp = tempfile.NamedTemporaryFile(delete=False)
shutil.copyfileobj(file.file, tmp)
tmp.close()
try:
# Reopen temporary file as binary for streaming download
stream_file = open(tmp.name, 'rb')
# Monkey patch .close method so that file is removed after closing it
# i.e. when response finishes
original_close = stream_file.close
def new_close():
original_close()
os.remove(tmp.name)
stream_file.close = new_close
return FileResponse(stream_file,
as_attachment=True,
filename=file.name)
except Exception as err:
# Make sure to remove temp file
os.remove(tmp.name)
raise APIException(err)
class ImportSFTPListView(APIView):
def post(self, request):
serializer = ImportSFTPListSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
params = serializer.data
path = params['path'] or '/'
client = SFTPClient(hostname=params['hostname'],
port=params['port'],
username=params['username'],
password=params['password'])
try:
files = client.listdir(path)
except PermissionError:
raise PermissionDenied(
detail=f'Listing {path} not allowed for user')
except AuthenticationException:
raise AuthenticationFailed()
return Response(dict(values=files))
class ImportSFTPView(APIView):
def post(self, request):
serializer = ImportSFTPSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
params = serializer.data
sftp_conn_info = {
'hostname': params['hostname'],
'port': params['port'],
'username': params['username'],
'password': params['password']
}
for file in params['files']:
enqueue_job('lomas_changes.tasks.perusat1.import_scene_from_sftp',
sftp_conn_info=sftp_conn_info,
file=file,
queue='processing')
return Response({}, status=status.HTTP_204_NO_CONTENT) |
import math
class CommandTurnLeft:
def __init__(self, whill, measure, angle_deg):
self.whill = whill
self.measure = measure
self.angle_deg = angle_deg
def run(self):
if math.fabs(self.measure.angle_deg) < math.fabs(self.angle_deg):
self.whill.send_joystick(int(0), int(-30))
return True
else:
print("done. turn left")
self.whill.send_joystick(int(0), int(0))
return False
|
"""
Wrapper for the pyproj library
"""
from pyproj import Proj
import pdb
class MyProj(object):
def __init__(self, projstr, utmzone=51, isnorth=False, init=None):
"""
Wrapper for Proj class
Assists with creation of commonly used projections (UTM and Mercator)
Provides convenient methods for converting between xy and ll
"""
# Create a UTM projection string
if projstr is None:
projstr = "+proj=utm +zone=%d, +ellps=WGS84 +datum=WGS84 +units=m +no_defs"%utmzone
if not isnorth:
projstr += ' +south'
elif projstr.lower() == 'merc':
# Mercator string
projstr = '+proj=merc +lon_0=0 +k=1 +x_0=0 +y_0=0 +datum=WGS84 +no_defs'
# Projection object (super-classing doesn't work...)
#self.P = Proj(projstr, init=init)
self.P = Proj(projstr )
# Create the inverse projection here
#self.inverseProj = self.P.to_latlong()
def __call__(self, lon, lat):
return self.P(lon,lat)
###
def to_xy(self, lon, lat):
return self.P(lon, lat)
def to_ll(self, x, y):
return self.P(x, y, inverse=True)
#def __new__(self, projstr, **kwargs):
# if projstr is None:
# return
# else:
# return Proj.__new__(self, projstr)
################
#### Testing ###
#utmzone = 51
#isnorth = False
#projstr = 'merc'
#P = MyProj(projstr, utmzone=utmzone, isnorth=isnorth)
#
#print P([124.,124.1],[-12.,-12.1])
#
|
from .settings import *
import dj_database_url
DEBUG = True
INSTALLED_APPS = INSTALLED_APPS + (
'debug_toolbar',
)
SITE_URL = "http://192.168.1.101:8000"
FROM_EMAIL = "paul@pluscom.co.ke"
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
INTERNAL_IPS = ('127.0.0.1', 'localhost','192.168.1.101')
DEBUG_TOOLBAR_PATCH_SETTINGS = False
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.cache.CachePanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
MIDDLEWARE = MIDDLEWARE + [
# ...
'debug_toolbar.middleware.DebugToolbarMiddleware',
# ...
]
if 'DATABASE_URL' in os.environ:
DATABASES = {'default': dj_database_url.config()}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mfi',
'USER': 'ishma',
'PASSWORD': 'ishma59',
'HOST': '127.0.0.1',
'PORT': '',
}
}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
|
all = ''
counter = 0
wr = open('../sequences/all.txt', 'w')
with open('../sequences/His68Leufs.txt', 'r') as f:
for line in f:
line = line.strip(' ')
wr.write(line.strip('\n'))
# for i in range(0, line.strip('\n'))
# all += line.strip('\n') |
import logging
import json
import threading
import os
import paho.mqtt.client as mqtt
import base64
from time import sleep, time
from random import random
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
logger = logging.getLogger(__name__)
IDS_JSON_FILE = 'ids.json'
CONFIG_JSON_FILE = 'config.json'
MAX_LIVE_PERIOD = 86400 # 24 hours
PAYLOAD_BYTES = 9 # <Latitude: 4 bytes><Longitude: 4 bytes><SOC: 1 byte>
MAX_LOG_LINES = 7200
class Telegram_Backend():
def __init__(self):
# Variables
self.config = {"token": None, "users": [], "APPID": None, "PSW": None}
self.last_soc = 0
# Load config
if os.path.isfile(CONFIG_JSON_FILE):
with open(CONFIG_JSON_FILE, 'r') as file:
self.config = json.load(file)
else:
logger.error(
"Config file not found, created empty config. Fill it.")
with open(CONFIG_JSON_FILE, 'w') as file:
json.dump(self.config, file)
exit()
# Template for all chat IDs
self.id_template = {"chat_id": None, "msg_id": None,
"bat_id": None, "tracker_id": None}
chat_ids = {u: self.id_template for u in self.config["users"]}
# Load chat ids
if os.path.isfile(IDS_JSON_FILE):
with open(IDS_JSON_FILE, 'r') as file:
self.chat_ids = json.load(file)
# Check if we added any users
for user in chat_ids.keys():
if not user in self.chat_ids.keys():
self.chat_ids[user] = self.id_template
logger.info('Added user {}'.format(user))
# Check if we removed any users
users_to_delete = list()
for user in self.chat_ids.keys():
if not user in chat_ids.keys():
users_to_delete.append(user)
for user in users_to_delete:
del self.chat_ids[user]
logger.info('Removed user {}'.format(user))
# Dump chat ids
with open(IDS_JSON_FILE, 'w') as file:
json.dump(self.chat_ids, file)
else:
self.chat_ids = chat_ids
# Create the Updater and pass it your bot's token.
self.updater = Updater(self.config["token"], use_context=True)
# Bot
self.bot = self.updater.bot
# Get the dispatcher to register handlers
dp = self.updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", self.start_command))
dp.add_handler(CommandHandler("add_user", self.add_user))
dp.add_handler(CommandHandler("rm_user", self.rm_user))
# Start polling, non-blocking!
self.updater.start_polling()
def start_command(self, update, context):
"""Send a message when the command /start is issued."""
# Extract user id and username
chat_id = update.message.chat.id
username = update.message.chat.username
msg = update.message.text.split(" ")
# User not allowed
if not username in self.config["users"]:
update.message.reply_text(
'Unknown user, please contact maintainer')
return
if not len(msg) == 2:
update.message.reply_text(
'Please specify your tracker ID with /start <ID>')
return
# Tracker ID
tracker_id = msg[1]
# Get ids
self.chat_ids[username]["chat_id"] = chat_id
self.chat_ids[username]["msg_id"] = None
self.chat_ids[username]["bat_id"] = None
self.chat_ids[username]["tracker_id"] = tracker_id
# Dump chat ids
with open(IDS_JSON_FILE, 'w') as file:
json.dump(self.chat_ids, file)
# Success message
update.message.reply_text(
'Started LoRa_Tracker application for user {} with tracker ID {}'.format(username, tracker_id))
def add_user(self, update, context):
"""Add a new user"""
# Check if user is allowed to use it
username = update.message.chat.username
if not username == self.config["users"][0]:
logger.warning("User {} cannot add users".format(username))
return
# Find command argument
command_txt = '/add_user'
command_msg = update.message.text.split(" ")
del command_msg[0]
if not len(command_msg) == 1:
logger.warning("Add user cmd wrong format")
return
# Append to allowed users
new_user = command_msg[0]
if not new_user in self.config["users"]:
self.config["users"].append(new_user)
self.chat_ids[new_user] = self.id_template
logger.info("Added user {}".format(new_user))
# Write it
with open(CONFIG_JSON_FILE, 'w') as file:
json.dump(self.config, file)
def rm_user(self, update, context):
"""Remove a user"""
# Check if user is allowed to use it
username = update.message.chat.username
if not username == self.config["users"][0]:
logger.warning("User {} cannot remove users".format(username))
return
# Find command argument
command_txt = '/rm_user'
command_msg = update.message.text.split(" ")
del command_msg[0]
if not len(command_msg) == 1:
logger.warning("Remove user cmd wrong format")
return
# Delete from allowed users
rm_user = command_msg[0]
if rm_user in self.config["users"]:
self.config["users"].remove(rm_user)
logger.info("Removed user {}".format(rm_user))
# Write it
with open(CONFIG_JSON_FILE, 'w') as file:
json.dump(self.config, file)
# Delete chat ids for this user
if rm_user in self.chat_ids.keys():
del self.chat_ids[rm_user]
with open(IDS_JSON_FILE, 'w') as file:
json.dump(self.chat_ids, file)
def send_live_location(self, username, lat, lon):
""" Send a live gps position to given user"""
chat_id = self.chat_ids[username]["chat_id"]
if chat_id is None:
return
msg_id = self.chat_ids[username]["msg_id"]
# If there is already an active live location we just edit it
if not msg_id is None:
try:
ret = self.bot.editMessageLiveLocation(
chat_id, msg_id, latitude=lat, longitude=lon, disable_notification=True)
except Exception as e:
logger.warning('Could not edit message: {}'.format(e))
ret = True
# Returns true on failure
if not ret is True:
return
# Delete the previous msg, in order to keep chat clean
try:
self.bot.delete_message(chat_id, msg_id)
except Exception as e:
logger.warning('Could not delete message: {}'.format(e))
# Either live location is not valid or we did not have one active
logger.info('Could not edit live location for {}'.format(username))
m = self.bot.send_location(chat_id, latitude=lat, longitude=lon,
live_period=MAX_LIVE_PERIOD, disable_notification=True)
self.chat_ids[username]["msg_id"] = m.message_id
# Dump chat ids
with open(IDS_JSON_FILE, 'w') as file:
json.dump(self.chat_ids, file)
def send_soc(self, username, soc):
"""Send SOC message"""
chat_id = self.chat_ids[username]["chat_id"]
soc_msg = 'Bat: {}%'.format(soc)
if chat_id is None:
return
bat_id = self.chat_ids[username]["bat_id"]
# If there is already an active live location we just edit it
if not bat_id is None:
# If SOC is same we do not edit
if soc == self.last_soc:
return
try:
ret = self.bot.edit_message_text(
soc_msg, chat_id, bat_id)
except Exception as e:
logger.warning('Could not edit message: {}'.format(e))
ret = True
# Update last SOC
self.last_soc = soc
# Returns true on failure
if not ret is True:
return
# Delete the previous msg, in order to keep chat clean
try:
self.bot.delete_message(chat_id, bat_id)
except Exception as e:
logger.warning('Could not delete message: {}'.format(e))
# Either live location is not valid or we did not have one active
logger.info('Could not edit battery message for {}'.format(username))
m = self.bot.send_message(self.chat_ids[username]["chat_id"], soc_msg)
# Update last SOC
self.last_soc = soc
self.chat_ids[username]["bat_id"] = m.message_id
# Dump chat ids
with open(IDS_JSON_FILE, 'w') as file:
json.dump(self.chat_ids, file)
class MQTT_TTN():
def __init__(self):
# Telegram backend
self.tb = Telegram_Backend()
APPID = self.tb.config["APPID"]
PSW = self.tb.config["PSW"]
# MQTT Client
self.mqttc = mqtt.Client()
self.mqttc.on_connect = self.on_connect
self.mqttc.on_message = self.on_message
self.mqttc.username_pw_set(APPID, PSW)
def start(self):
self.mqttc.connect("eu.thethings.network", 1883, 60)
while True:
self.mqttc.loop()
def on_connect(self, mqttc, mosq, obj, rc):
logger.info('Connected with result code: {}'.format(rc))
if not rc == 0:
logger.error("ERROR: Could not connect to TTN")
return
# subscribe for all devices of user
mqttc.subscribe('+/devices/+/up')
def on_message(self, mqttc, obj, msg):
payload_json = json.loads(msg.payload)
msg_bytes = base64.b64decode(payload_json['payload_raw'])
dev_id = payload_json['dev_id']
logger.info('Received {} bytes'.format(len(msg_bytes)))
# Check length of msg
if not len(msg_bytes) == PAYLOAD_BYTES:
logger.error('Invalid payload size: {}'.format(len(msg_bytes)))
return
# Otherwise parse the msg
self.parse_payload(msg_bytes, dev_id)
def parse_payload(self, msg, dev_id):
# Assemble latitude
lat = msg[0]
lat = lat << 8 | msg[1]
lat = lat << 8 | msg[2]
lat = lat << 8 | msg[3]
lat = self.twos_comp(lat, 32)
lat /= 10**7
# Assemble longitude
lon = msg[4]
lon = lon << 8 | msg[5]
lon = lon << 8 | msg[6]
lon = lon << 8 | msg[7]
lon = self.twos_comp(lon, 32)
lon /= 10**7
# SOC in %
soc = round(msg[8]/255*100.0, 2)
for user in self.tb.chat_ids.keys():
if self.tb.chat_ids[user]['tracker_id'] == dev_id:
# Send the location
self.tb.send_live_location(user, lat, lon)
self.tb.send_soc(user, soc)
# Log the location
lines = list()
log_file = '{}.log'.format(user)
if os.path.isfile(log_file):
with open(log_file, 'r') as l:
lines = l.readlines()
# Limit file length
if len(lines) > MAX_LOG_LINES:
del lines[0:len(lines)-MAX_LOG_LINES]
with open(log_file, 'w') as l:
for line in lines:
l.write(line)
l.write('{}: {}, {}, {}\n'.format(time(), lat, lon, soc))
l.flush()
def twos_comp(self, val, bits):
"""compute the 2's complement of int value val"""
if (val & (1 << (bits - 1))) != 0:
val = val - (1 << bits)
return val
def main():
mqtt_ttn = MQTT_TTN()
while True:
try:
# Blocking call, while loop makes sure to just restart
mqtt_ttn.start()
except Exception as e:
logger.error('MQTT thread failed, restarting')
# Test payloads
# lat = {"0": 47.399978, "1": 40.749806}
# lon = {"0": 8.546835, "1": -73.987806}
# Payload 1
# 1C40A9A4 051824BE 64
# Payload 2
# 1849ED4C D3E65B54 32
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Time : 2021/12/22 22:04
# Author : QIN2DIM
# Github : https://github.com/QIN2DIM
# Description:
from services.cluster import __entropy__, __pending__
from services.middleware.workers_io import EntropyHeap
from services.settings import logger
from services.utils import CoroutineSpeedup
from services.utils import ToolBox
def update():
EntropyHeap().update(local_entropy=__entropy__)
logger.success(ToolBox.runtime_report(
motive="UPDATE",
action_name="ScaffoldEntropy",
message="update remote tasks queue."
))
def preview(remote: bool = False):
# 将要输出的摘要数据 <localQueue> or <remoteQueue>
check_entropy = __entropy__ if not remote else EntropyHeap().sync()
# 当摘要数据非空时输出至控制台
if check_entropy:
for i, atomic in enumerate(check_entropy):
print(f">>> [{i + 1}/{check_entropy.__len__()}]{atomic['name']}")
print(f"注入地址: {atomic['register_url']}")
print(f"生命周期: {atomic.get('life_cycle', 24)}小时")
print(f"超级参数: {atomic.get('hyper_params')}\n")
logger.success(ToolBox.runtime_report(
motive="PREVIEW",
action_name="ScaffoldEntropy",
))
else:
logger.warning(ToolBox.runtime_report(
motive="PREVIEW",
action_name="ScaffoldEntropy",
message="empty entropy."
))
def check(power: int = None):
class EntropyChecker(CoroutineSpeedup):
def __init__(self, docker, utils):
super(EntropyChecker, self).__init__(docker=docker)
self.control_driver = utils
mirror_entropy = __entropy__ + __pending__
power = mirror_entropy.__len__() if not isinstance(power, int) else abs(power)
urls = list({atomic["register_url"] for atomic in mirror_entropy.copy()})
sug = EntropyChecker(docker=urls, utils=ToolBox.check_html_status)
sug.go(power=power, action_name="ScaffoldEntropy")
logger.success(ToolBox.runtime_report(
motive="CHECK",
action_name="ScaffoldEntropy",
))
|
import app_config
import copy
from fabric.api import *
from fabric.state import env
from jinja2 import Template
import logging
import carebot
"""
General configuration
"""
env.user = app_config.SERVER_USER
env.hosts = app_config.SERVERS
env.slug = app_config.PROJECT_SLUG
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
env.user = app_config.SERVER_USER
env.forward_agent = True
env.hosts = []
env.settings = None
"""
Configuration
"""
def _get_template_conf_path(service, extension):
"""
Derive the path for a conf template file.
"""
return 'confs/%s.%s' % (service, extension)
def _get_rendered_conf_path(service, extension):
"""
Derive the rendered path for a conf file.
"""
return 'confs/rendered/%s.%s.%s' % (app_config.PROJECT_FILENAME, service, extension)
def _get_installed_conf_path(service, remote_path, extension):
"""
Derive the installed path for a conf file.
"""
return '/etc/init/%s.%s.%s' % (app_config.PROJECT_FILENAME, service, extension)
def _get_installed_service_name(service):
"""
Derive the init service name for an installed service.
"""
return '%s.%s' % (app_config.PROJECT_FILENAME, service)
"""
Running the app
Probably only neded the first time, to set up oauth creds
"""
@task
def app(port='8000'):
"""
Serve app.py.
"""
if env.settings:
local("DEPLOYMENT_TARGET=%s bash -c 'gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload app:wsgi_app'" % (env.settings, port))
else:
local('gunicorn -b 0.0.0.0:%s --timeout 3600 --debug --reload app:wsgi_app' % port)
"""
Environments
"""
@task
def production():
"""
Run as though on production.
"""
env.settings = 'production'
env.branch = 'master'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
@task
def staging():
"""
Run as though on staging.
"""
env.settings = 'staging'
env.branch = 'master'
app_config.configure_targets(env.settings)
env.hosts = app_config.SERVERS
"""
Branches
"""
@task
def branch(branch_name):
"""
Work on any specified branch.
"""
env.branch = branch_name
@task
def create_directories():
"""
Create server directories.
"""
require('settings', provided_by=['production', 'staging'])
run('mkdir -p %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
run('git clone %(REPOSITORY_URL)s %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
@task
def setup_logs():
"""
Create log directories.
"""
require('settings', provided_by=['production', 'staging'])
sudo('mkdir %(SERVER_LOG_PATH)s' % app_config.__dict__)
sudo('chown ubuntu:ubuntu %(SERVER_LOG_PATH)s' % app_config.__dict__)
@task
def create_virtualenv():
"""
Setup a server virtualenv.
"""
require('settings', provided_by=['production', 'staging'])
run('virtualenv -p %(SERVER_PYTHON)s %(SERVER_VIRTUALENV_PATH)s' % app_config.__dict__)
run('source %(SERVER_VIRTUALENV_PATH)s/bin/activate' % app_config.__dict__)
@task
def clone_repo():
"""
Clone the source repository.
"""
require('settings', provided_by=['production', 'staging'])
run('git clone %(REPOSITORY_URL)s %(SERVER_REPOSITORY_PATH)s' % app_config.__dict__)
if app_config.REPOSITORY_ALT_URL:
run('git remote add bitbucket %(REPOSITORY_ALT_URL)s' % app_config.__dict__)
"""
fab migration:name='201603241327_add_team'
"""
@task
def migration(name):
run('source %(SERVER_VIRTUALENV_PATH)s/bin/activate' % app_config.__dict__)
run('python %s/migrations/%s.py' % (app_config.__dict__['SERVER_PROJECT_PATH'], name))
@task
def checkout_latest(remote='origin'):
"""
Get the updated code
"""
run('cd %s; git fetch %s' % (app_config.SERVER_PROJECT_PATH, remote))
run('cd %s; git checkout %s; git pull %s %s' % (app_config.SERVER_PROJECT_PATH, env.branch, remote, env.branch))
@task
def install_requirements():
"""
Install the latest requirements.
"""
require('settings', provided_by=['production', 'staging'])
run('%(SERVER_VIRTUALENV_PATH)s/bin/pip install -U -r %(SERVER_PROJECT_PATH)s/requirements.txt' % app_config.__dict__)
# run('cd %(SERVER_REPOSITORY_PATH)s; npm install' % app_config.__dict__)
@task
def render_confs():
"""
Renders server configurations.
"""
require('settings', provided_by=['production', 'staging'])
with settings(warn_only=True):
local('rm -rf confs/rendered')
local('mkdir confs/rendered')
# Copy the app_config so that when we load the secrets they don't
# get exposed to other management commands
context = copy.copy(app_config.__dict__)
context.update(app_config.get_secrets())
for service, remote_path, extension in app_config.SERVER_SERVICES:
template_path = _get_template_conf_path(service, extension)
rendered_path = _get_rendered_conf_path(service, extension)
with open(template_path, 'r') as read_template:
with open(rendered_path, 'wb') as write_template:
payload = Template(read_template.read())
write_template.write(payload.render(**context))
@task
def deploy_confs():
"""
Deploys rendered server configurations to the specified server.
This will reload nginx and the appropriate uwsgi config.
"""
require('settings', provided_by=['production', 'staging'])
put('%s.env' % env.settings, '%(SERVER_PROJECT_PATH)s/.env' % app_config.__dict__)
# TODO -- we might want to run `source .env`?
render_confs()
with settings(warn_only=True):
for service, remote_path, extension in app_config.SERVER_SERVICES:
rendered_path = _get_rendered_conf_path(service, extension)
installed_path = _get_installed_conf_path(service, remote_path, extension)
print 'Updating %s' % installed_path
put(rendered_path, installed_path, use_sudo=True)
sudo('initctl reload-configuration')
if service == 'nginx':
sudo('service nginx reload')
else:
service_name = _get_installed_service_name(service)
sudo('service %s restart' % service_name)
@task
def deploy_analytics_conf():
# Move google ouath credentials
local('cp ~/.google_oauth_credentials ./.google_oauth_credentials')
put('.google_oauth_credentials', '~/.google_oauth_credentials')
put('.google_oauth_credentials', '/root/.google_oauth_credentials', use_sudo=True)
# run('mkdir -p %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
@task
def install_crontab():
"""
Install cron jobs script into cron.d.
"""
require('settings', provided_by=['production', 'staging'])
sudo('cp %(SERVER_PROJECT_PATH)s/crontab /etc/cron.d/%(PROJECT_FILENAME)s' % app_config.__dict__)
@task
def setup_database():
"""
Manually create an empty sqlite DB.
Otherwise it gets created by root on first run, and regular
users can't write to it.
"""
sudo('sqlite3 %(SERVER_PROJECT_PATH)s/%(PROJECT_FILENAME)s.db ".databases"' % app_config.__dict__)
sudo('chown ubuntu:ubuntu %(SERVER_PROJECT_PATH)s/%(PROJECT_FILENAME)s.db' % app_config.__dict__)
@task
def start_service(service):
"""
Start a service on the server.
"""
service_name = _get_installed_service_name(service)
sudo('service %s start' % service_name)
@task
def stop_service(service):
"""
Stop a service on the server
"""
service_name = _get_installed_service_name(service)
sudo('service %s stop' % service_name)
@task
def setup():
require('settings', provided_by=['production', 'staging'])
setup_logs()
create_directories()
create_virtualenv()
clone_repo()
checkout_latest()
setup_database()
install_requirements()
deploy_analytics_conf()
deploy_confs()
install_crontab()
@task
def reboot():
"""
Restart the server
TKTK
"""
None
@task
def deploy():
require('settings', provided_by=['production', 'staging'])
with settings(warn_only=True):
stop_service('bot')
checkout_latest()
install_requirements()
deploy_analytics_conf()
render_confs()
deploy_confs()
install_crontab()
"""
Deaths, destroyers of worlds
"""
@task
def shiva_the_destroyer():
"""
Remove all directories, databases, etc. associated with the application.
"""
with settings(warn_only=True):
run('rm -Rf %(SERVER_PROJECT_PATH)s' % app_config.__dict__)
run('rm -Rf %(SERVER_VIRTUALENV_PATH)s' % app_config.__dict__)
sudo('rm -Rf %(SERVER_LOG_PATH)s' % app_config.__dict__)
# Remove any installed services
stop_service('bot')
installed_service_path = _get_installed_conf_path(service, remote_path, extension)
sudo('rm -f %s' % installed_service_path)
|
import json
import unittest
from parameterized import parameterized
from repro.models.liu2019 import BertSumExt, BertSumExtAbs, TransformerAbs
from repro.testing import get_testing_device_parameters
from . import FIXTURES_ROOT
class TestLiu2019Models(unittest.TestCase):
def setUp(self) -> None:
# The example documents were taken from the PreSumm repo:
# https://github.com/nlpyang/PreSumm/blob/dev/raw_data/temp.raw_src
self.expected_output = json.load(open(f"{FIXTURES_ROOT}/expected-output.json"))
@parameterized.expand(get_testing_device_parameters())
def test_bert_sum_ext_regression(self, device: int):
model = BertSumExt(device=device)
# Test when the document is pre-sentence split
document = self.expected_output["BertSumExt"]["document"]
expected_summary = self.expected_output["BertSumExt"]["summary"]
summary = model.predict(document)
assert summary == expected_summary
@parameterized.expand(get_testing_device_parameters())
def test_bert_sum_ext_abs_cnndm_regression(self, device: int):
model = BertSumExtAbs(device=device)
document = self.expected_output["BertSumExtAbs_CNNDM"]["document"]
expected_summary = self.expected_output["BertSumExtAbs_CNNDM"]["summary"]
summary = model.predict(document)
assert summary == expected_summary
@parameterized.expand(get_testing_device_parameters())
def test_bert_sum_ext_abs_xsum_regression(self, device: int):
model = BertSumExtAbs(model="bertsumextabs_xsum.pt", device=device)
document = self.expected_output["BertSumExtAbs_XSum"]["document"]
expected_summary = self.expected_output["BertSumExtAbs_XSum"]["summary"]
summary = model.predict(document)
assert summary == expected_summary
@parameterized.expand(get_testing_device_parameters())
def test_transformer_abs_regression(self, device: int):
model = TransformerAbs(device=device)
document = self.expected_output["TransformerAbs"]["document"]
expected_summary = self.expected_output["TransformerAbs"]["summary"]
summary = model.predict(document)
assert summary == expected_summary
|
import os
import tempfile
from pathlib import Path
import fsspec
import pytest
from callee.strings import StartsWith
from sgkit.io.vcf.utils import build_url, temporary_directory
def directory_with_file_scheme() -> str:
return f"file://{tempfile.gettempdir()}"
def directory_with_missing_parent() -> str:
# create a local temporary directory using Python tempfile
with tempfile.TemporaryDirectory() as dir:
pass
# we know it doesn't exist
assert not Path(dir).exists()
return dir
@pytest.mark.parametrize(
"dir",
[None, directory_with_file_scheme(), directory_with_missing_parent()],
)
def test_temporary_directory(dir):
prefix = "prefix-"
suffix = "-suffix"
with temporary_directory(suffix=suffix, prefix=prefix, dir=dir) as tmpdir:
if tmpdir.startswith("file:///"):
tmpdir = tmpdir[7:]
dir = Path(tmpdir)
assert dir.exists()
assert dir.name.startswith(prefix)
assert dir.name.endswith(suffix)
with open(dir / "file.txt", "w") as file:
file.write("Hello")
assert not dir.exists()
def test_temporary_directory__no_permission():
# create a local temporary directory using Python tempfile
with tempfile.TemporaryDirectory() as dir:
os.chmod(dir, 0o444) # make it read-only
with pytest.raises(PermissionError):
with temporary_directory(dir=dir):
pass # pragma: no cover
def test_non_local_filesystem(mocker):
# mock out fsspec calls
mock = mocker.patch("fsspec.filesystem")
myfs = mocker.MagicMock()
mock.return_value = myfs
# call function
with temporary_directory(
prefix="mytmp", dir="myfs://path/file", storage_options=dict(a="b")
):
pass
# check expected called were made
fsspec.filesystem.assert_called_once_with("myfs", a="b")
myfs.mkdir.assert_called_once_with(StartsWith("myfs://path/file/mytmp"))
myfs.rm.assert_called_once_with(
StartsWith("myfs://path/file/mytmp"), recursive=True
)
def test_build_url():
assert build_url("http://host/path", "subpath") == "http://host/path/subpath"
assert build_url("http://host/path/", "subpath") == "http://host/path/subpath"
assert (
build_url("http://host/path?a=b", "subpath") == "http://host/path/subpath?a=b"
)
assert (
build_url("http://host/path/?a=b", "subpath") == "http://host/path/subpath?a=b"
)
assert build_url("http://host/path#a", "subpath") == "http://host/path/subpath#a"
assert build_url("s3://host/path", "subpath") == "s3://host/path/subpath"
assert build_url("relative_path/path", "subpath") == "relative_path/path/subpath"
assert build_url("/absolute_path/path", "subpath") == "/absolute_path/path/subpath"
assert (
build_url("http://host/a%20path", "subpath") == "http://host/a%20path/subpath"
)
assert build_url("http://host/a path", "subpath") == "http://host/a%20path/subpath"
|
"""
Test version of a classic optimised Dijkstra algorithm for single source shortest path
"""
import networkx as nx
#from skfmm import heap
#import heapq
import heapdict
#from fibheap import *
import fibonacci_heap_mod
class DijkstraSSSP:
def __init__(self,G):
self.graph = G
self.d = {}
self.p = {} #contains the best path from u to v
self.Q = heapdict.heapdict()
self.fibmap = {} #contains lookup between u,v and Entry in the fib heap for fibonacci_mod_heap
################################################################################
"""
@param s source node in graph
"""
def dijkstraSSSPTest(self,s):
self.d={}
self.d[s]=0
self.p={}
self.Q = heapdict.heapdict()
self.Q[s] = self.d[s]
for v in self.graph.nodes:
if v!=s:
self.d[v]=99999999999 #todo: max float?
self.p[v]=""
while len(self.Q)>0:
u, priority = self.Q.popitem()
for e in nx.neighbors(self.graph,u):
self.relax([u,e,0]) #annoyingly, networkx stores edges as triples like this: [origin, dest, ???]
################################################################################
def relax(self,e):
#get the two edge ends
#e=[u,v,0] triple to match n
#print("relax: ",e)
u=e[0]
v=e[1]
w=self.graph.edges[e]['weight']
#
if self.d[u]+w<self.d[v]:
self.d[v]=self.d[u]+w #check - is the right, was c(e), not c(u,v) - it's the same
if self.p[v]=="":
self.Q[v]=self.d[v] #This is pushing a new element, v
else:
self.Q[v]=self.d[v] #This is a problem, you need a dict heap that lets you modify the priority "decrease-key" and can rebuild the heap to maintain the minimum head
self.p[v]=u #what are we setting here - this just needs to be a visited flag to prevent cycles?
################################################################################
"""
Prints the results of running dijkstraSSSP i.e. the cost of getting to every node in the graph from the source node
"""
def debugPrintData(self):
for v,w in self.d.items():
if v in self.p:
pv=self.p[v]
else:
pv="n/a"
print(v,"-->",w, "p[v]=",pv)
################################################################################
"""
Prints the results of running KKP_APSP i.e. the cost of getting to every node in the graph from every other node
All the d and p hashes are two level now as we've got multiple sources
"""
def debugPrintAPSPData(self):
for s in self.d:
for v,w in self.d[s].items():
if v in self.p[s]:
pv=self.p[s][v]
else:
pv="n/a"
print(s, "-->", v, "-->" , w , "p[s][v]=",pv)
################################################################################
"""
This is an implementation of the Karger, Koller and Phillips algorithm for APSP using a modification of Dijkstra to speed up finding all the pairs of
shortest paths simultaneously.
D.R. Karger, D. Koller, and S.J. Phillips. Finding the hidden path: time bounds for all-pairs shortest paths. SIAM Journal on Computing, 22:1199–1217, 1993.
"""
def KKP_APSP(self):
self.d={} #this is going to be d[u,v] distances
self.p={} #this is going to be p[u,v] node names
vin={} #vertex in
vout={} #vertex out
for u in self.graph.nodes:
self.d[u]={} #need to init the hash of hash
self.d[u][u]=0
self.p[u]={} #need to init this too
self.fibmap[u]={} #and this
vin[u]=[]
vout[u]=[]
#endfor
for u in self.graph.nodes:
for v in self.graph.nodes:
if u!=v:
if not u in self.d:
self.d[u]={}
self.p[u]={}
#endif
self.d[u][v]=99999999999999 #todo: max float
self.p[u][v]=""
self.fibmap[u][v]=None
#endif
#endfor
#endfor
#self.Q = heapdict.heapdict()
#self.Q = makefheap()
self.Q = fibonacci_heap_mod.Fibonacci_heap()
for e in self.graph.edges:
u=e[0]
v=e[1]
w=self.graph.edges[e]['weight']
self.d[u][v]=w
self.p[u][v]=e
#self.Q[(u,v)] = self.d[u][v]
#fheappush(self.Q, (self.d[u][v],(u,v)) )
entry = self.Q.enqueue( (u,v), self.d[u][v] )
self.fibmap[u][v]=entry
#endfor
while len(self.Q)>0:
#while self.Q.num_nodes>0:
#edge, priority = self.Q.popitem()
#priority, edge = fheappop(self.Q)
entry = self.Q.dequeue_min()
edge = entry.m_elem #and weight =m_priority
#print("weight=",entry.m_priority)
u = edge[0]
v = edge[1]
vin[v].append(u) #insert? TODO
for e in vout[v]:
self.KKP_Relax(u,e)
#endfor
if self.p[u][v][0]==u: #TODO start[p[u,v]]=u? i.e. start vertex of edge == u
vout[u].append(self.p[u][v]) #TODO insert?
for w in vin[u]:
self.KKP_Relax(w,self.p[u][v])
#endfor
#endif
#endwhile
################################################################################
"""
Edge relaxation for the Karger, Koller and Phillips algorithm
"""
def KKP_Relax(self,u,e):
v=e[0]
w=e[1]
weight = self.graph.edges[e]['weight']
if self.d[u][v]+weight < self.d[u][w]:
self.d[u][w] = self.d[u][v]+weight
if self.p[u][w] == "":
#self.Q[(u,w)]=self.d[u][w] #heap-insert
#fheappush(self.Q, (self.d[u][w], (u,w)) )
entry = self.Q.enqueue( (u,w), self.d[u][w] )
self.fibmap[u][w]=entry
else:
#self.Q[(u,w)]=self.d[u][w] #decrease-key
entry = self.fibmap[u][w]
self.Q.decrease_key(entry, self.d[u][w])
self.p[u][w] = e
#endif
################################################################################
|
from . import financialmodelingprep as fmp_api
|
#! /usr/bin/env python3
# Copyright(c) 2017 Intel Corporation.
# License: MIT See LICENSE file in root directory.
from mvnc import mvncapi as mvnc
from sys import argv
import numpy
import cv2
from os import listdir
from os.path import isfile, join
from random import choice
from timeit import timeit
from threading import Thread
from os import system
if len(argv) != 5:
print('Syntax: python3 pytest.py <network directory> <picture directory> <input img width> <input img height>')
print(' <network directory> is the directory that contains graph, stat.txt and')
print(' categories.txt')
print(' <picture directory> is the directory with several JPEG or PNG images to process')
quit()
img_width = int(argv[3])
img_height = int(argv[4])
#mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)
# *****************************************************************
# Get a list of devices
# *****************************************************************
devices = mvnc.EnumerateDevices()
if len(devices) == 0:
print('No devices found')
quit()
#print("\n\nFound ", len(devices), "devices :", devices, "\n\n")
devHandle = []
graphHandle = []
# *****************************************************************
# Read and preprocess image file(s)
# *****************************************************************
imgarr = []
onlyfiles = [f for f in listdir(argv[2]) if isfile(join(argv[2], f))]
onlyfiles = onlyfiles[:100]
for file in onlyfiles:
fimg = argv[2] + "/" + file
print("Opening file ", fimg)
img = cv2.imread(fimg)
#if img==None:
# continue
img = cv2.resize(img, (img_width, img_height))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = img.astype(numpy.float16)
#print(file, img.shape)
imgarr.append(img)
# *****************************************************************
# Read graph file
# *****************************************************************
graph_folder=argv[1]
system("(cd " + graph_folder + "; test -f graph || make compile)")
#Load graph
with open(join(argv[1],'graph'), mode='rb') as f:
graph = f.read()
# *****************************************************************
# Open the device and load the graph into each of the devices
# *****************************************************************
for devnum in range(len(devices)):
#print("***********************************************")
devHandle.append(mvnc.Device(devices[devnum]))
devHandle[devnum].OpenDevice()
opt = devHandle[devnum].GetDeviceOption(mvnc.DeviceOption.OPTIMISATION_LIST)
#print("Optimisations:")
#print(opt)
graphHandle.append(devHandle[devnum].AllocateGraph(graph))
graphHandle[devnum].SetGraphOption(mvnc.GraphOption.ITERATIONS, 1)
iterations = graphHandle[devnum].GetGraphOption(mvnc.GraphOption.ITERATIONS)
#print('Iterations:', iterations)
#print("***********************************************")
#print("Loaded Graphs")
#print("***********************************************\n\n\n")
def runparallel(count=100, num=[]):
numdevices = num
if len(num) == 0: numdevices = range(len(devices))
for i in range(count):
# *****************************************************************
# Load the Tensor to each of the devices
# *****************************************************************
for devnum in numdevices:
img = choice(imgarr)
graphHandle[devnum].LoadTensor(img, 'user object')
# *****************************************************************
# Read the result from each of the devices
# *****************************************************************
for devnum in numdevices:
tensor, userobj = graphHandle[devnum].GetResult()
def runthreaded(count=100,num=[]):
numdevices = num
if len(num) == 0: numdevices = range(len(devices))
thread_list = []
for ii in numdevices:
thread_list.append(Thread(target=runparallel, args=(count,[ii],)))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
if __name__ == '__main__':
# *****************************************************************
# Runs and times runthreaded with 'i' sticks, until all sticks
# run at once
# *****************************************************************
for i in range(1, len(devices)+1):
num = str(list(range(i)))
tot_time = timeit("runthreaded(count=100,num="+num+")", setup="from __main__ import runthreaded", number=1)
print("\n\nRunning " + argv[1] +" on "+str(i)+" sticks threaded : %0.2f FPS\n\n"%(100.0*i/tot_time))
# *****************************************************************
# Clean up and close devices
# *****************************************************************
for devnum in range(len(devices)):
graphHandle[devnum].DeallocateGraph()
devHandle[devnum].CloseDevice()
#print('\n\nFinished\n\n')
|
# This source is based on:
# 'examples/vhdl/array_axis_vcs/run.py' from VUnit (Mozilla Public License, v. 2.0)
from pathlib import Path
from vunit import VUnit
VU = VUnit.from_argv()
VU.add_random()
VU.add_verification_components()
ROOT = Path(__file__).parent
VU.add_library("lib").add_source_files([
ROOT / "*.vhd",
ROOT / '..' / '..' / "src" / "*.vhd"
])
VU.main()
|
#!/usr/bin/env python
# Si570Utils class gives python access to the Si570 Digital
# Sythesizer via a USB connection.
# Copyright (C) 2014 Martin Ewing
# Massive modifications by Bob Bouterse WD8RDE, I only need it to calculate register values.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact the author by e-mail: aa6e@arrl.net
# This is "middleware" that defines an API for general user programming
# of radio systems using the Si570 Programmable VCXO as an inexpensive
# digital VFO. These routines connect via USB to an ATtiny45 USB-to-I2C
# device, which is running the usbavrsi570 code from PE0FKO.
# Tested on SoftRock RxTx Ensemble that uses the ATtiny85 MPU chip and a
# SiLabs 570 ("CAC000141G / D1HOS144") chip
# [3.3v CMOS, 61 ppm stab., 10-160 MHz]
# partially based on a subset of operations.c from Andrew Nilsson VK6JBL
# Also, see http://www.silabs.com/Support%20Documents/TechnicalDocs/si570.pdf
# and https://code.google.com/p/usbavrsi570/
# require libusb-1.0 wrapper from https://pypi.python.org/pypi/libusb1/1.2.0
import math, sys
from sidefs import *
# Note changes from operation.c:
# 1. method names have changed to make them more regular.
# 2. get/set freq always work with floating MHz of signal frequency =
# osc frequency / multiplier.
class Si570Utils(object):
def __init__(self, verbose=0, fXtal=SI570_NOMINAL_XTALL_FREQ, multiplier=4):
self.verbose = verbose
self.fXtal = fXtal
self.multiplier = multiplier
def __calcDividers(self, f): # Returns solution = [HS_DIV, N1, f0, RFREQ]
# Instead of solution structure, use simple list for each variable.
cHS_DIV = list()
cN1 = list()
cf0 = list()
cRFREQ = list()
for i in range(7,-1,-1): # Count down through the dividers
if HS_DIV_MAP[i] > 0:
cHS_DIV.append(i)
y = (SI570_DCO_HIGH + SI570_DCO_LOW) / (2 * f)
y = y / HS_DIV_MAP[i]
if y < 1.5:
y = 1.0
else:
y = 2 * round(y/2.0)
if y > 128:
y = 128
cN1.append( math.trunc(y) - 1 )
cf0.append( f * y * HS_DIV_MAP[i] )
else:
cHS_DIV.append(None) # dummy result
cN1.append(None) # another dummy
cf0.append( 1.0E16 )
imin = -1
fmin = 1.0E16
for i in range(8):
if (cf0[i] >= SI570_DCO_LOW) & (cf0[i] <= SI570_DCO_HIGH) :
if cf0[i] < fmin:
fmin = cf0[i]
imin = i
if imin >= 0:
solution = [ cHS_DIV[imin], cN1[imin], cf0[imin], cf0[imin]/self.fXtal ]
if (self.verbose >= 2):
print "Solution:"
print " HS_DIV = %d" % solution[0]
print " N1 = %d" % solution[1]
print " f0 = %f" % solution[2]
print " RFREQ = %f" % solution[3]
else:
solution = None # This is the error return
return solution
def __setLongWord(self, v ): # v = int value; return bytearray(4)
iv = int(v) # be sure of int type
b = bytearray(4)
b[0] = iv & 0xff
b[1] = ((iv & 0xff00) >> 8) & 0xff
b[2] = ((iv & 0xff0000) >> 16) & 0xff
b[3] = ((iv & 0xff000000) >> 24) & 0xff
return b # NB bytearray, not long word!
def setFreq(self, frequency):
f = self.multiplier * frequency
if self.verbose:
print "Setting Si570 Frequency by registers to: %f" % f
sHS_DIV, sN1, sf0, sRFREQ = self.__calcDividers(f)
RFREQ_int = math.trunc(sRFREQ)
RFREQ_frac= int( round((sRFREQ - RFREQ_int) * 268435456) ) # check int ok
intbuf = self.__setLongWord( RFREQ_int )
fracbuf = self.__setLongWord( RFREQ_frac)
outbuf = bytearray(6)
outbuf[5] = fracbuf[0]
outbuf[4] = fracbuf[1]
outbuf[3] = fracbuf[2]
outbuf[2] = fracbuf[3] | ((intbuf[0] & 0xf) << 4)
outbuf[1] = RFREQ_int / 16 + ((sN1 & 0x3) << 6)
outbuf[0] = sN1/4 + (sHS_DIV << 5)
if self.verbose:
print "Set Freq Buffer",
print "%x %x %x %x %x %x" % (outbuf[0], outbuf[1], outbuf[2], outbuf[3], outbuf[4], outbuf[5])
return outbuf
# End of Si570 class
if __name__ == "__main__":
# debug code goes here
si = Si570Utils(verbose=4)
print "SET FREQ BY VALUE"
si.setFreq(7.5)
print "Done."
|
# -*- coding: utf-8 -*-
"""A library for driving the Pimoroni Micro Dot pHAT Raspberry Pi add-on.
This library creates a virtual buffer of unlimited size onto which you
can write text and icons for scrolling around the Micro Dot pHAT display.
Methods are included for rotating and scrolling, plus writing text either
kerned to one pixel spacing, or spaced to place one character per matrix.
"""
import atexit
try:
import numpy
except ImportError:
raise ImportError("This library requires the numpy module\nInstall with: sudo pip install numpy")
from .font import font as _font, tinynumbers as _tinynumbers
from .matrix import NanoMatrix
__version__ = '0.2.2'
WIDTH = 45
HEIGHT = 7
_is_setup = False
_buf = numpy.zeros((HEIGHT, WIDTH))
_decimal = [0] * 6
_scroll_x = 0
_scroll_y = 0
_clear_on_exit = True
_rotate180 = False
_mirror = False
def _exit():
if _clear_on_exit:
clear()
show()
def clear():
"""Clear the buffer"""
global _buf, _decimal, _scroll_x, _scroll_y
_decimal = [0] * 6
_buf = numpy.zeros((HEIGHT, WIDTH))
_scroll_x = 0
_scroll_y = 0
def fill(c):
"""Fill the buffer either lit or unlit
:param c: Colour that should be filled onto the display: 1=lit or 0=unlit
"""
global _buf
_buf.fill(c)
def is_connected():
""" Check if a microdot pHat is connected by trying to reach it over I2C.
Returns True if detected, False if not detected.
"""
return NanoMatrix.is_connected(0x61) and NanoMatrix.is_connected(0x62) and NanoMatrix.is_connected(0x63)
def set_clear_on_exit(value):
"""Set whether the display should be cleared on exit
Set this to false if you want to display a fixed message after
your Python script exits.
:param value: Whether the display should be cleared on exit: True/False
"""
global _clear_on_exit
_clear_on_exit = True if value else False
def set_rotate180(value):
"""Set whether the display should be rotated 180 degrees
:param value: Whether the display should be rotated 180 degrees: True/False
"""
global _rotate180
_rotate180 = True if value else False
def set_mirror(value):
"""Set whether the display should be flipped left to right (mirrored)
:param value: Whether the display should be flipped left to right: True/False
"""
global _mirror
_mirror = True if value else False
def set_col(x, col):
"""Set a whole column of the buffer
Only useful when not scrolling vertically
:param x: Specify which column to set
:param col: An 8-bit integer, the 7 least significant bits correspond to each row
"""
for y in range(7):
set_pixel(x, y, (col & (1 << y)) > 0)
def set_pixel(x, y, c):
"""Set the state of a single pixel in the buffer
If the pixel falls outside the current buffer size, it will be grown automatically
:param x: The x position of the pixel to set
:param y: The y position of the pixel to set
:param c: The colour to set: 1=lit or 0=unlit
"""
global _buf
c = 1 if c else 0
try:
_buf[y][x] = c
except IndexError:
if y >= _buf.shape[0]:
_buf = numpy.pad(_buf, ((0, y - _buf.shape[0] + 1), (0, 0)), mode='constant')
if x >= _buf.shape[1]:
_buf = numpy.pad(_buf, ((0, 0), (0, x - _buf.shape[1] + 1)), mode='constant')
_buf[y][x] = c
def write_char(char, offset_x=0, offset_y=0):
"""Write a single character to the buffer
:param char: The ASCII char to write
:param offset_x: Position the character along x (default 0)
:param offset_y: Position the character along y (default 0)
"""
char = _get_char(char)
for x in range(5):
for y in range(7):
p = (char[x] & (1 << y)) > 0
set_pixel(offset_x + x, offset_y + y, p)
def _get_char(char):
char_ordinal = None
try:
char_ordinal = ord(char)
except TypeError:
pass
if char_ordinal == 65374:
char_ordinal = 12316
if char_ordinal is None or char_ordinal not in _font:
raise ValueError("Unsupported char {}".format(char))
return _font[char_ordinal]
def set_decimal(index, state):
"""Set the state of a _decimal point
:param index: Index of _decimal from 0 to 5
:param state: State to set: 1=lit or 0=unlit
"""
global _decimal
if index in range(6):
_decimal[index] = 1 if state else 0
def write_string(string, offset_x=0, offset_y=0, kerning=True):
"""Write a string to the buffer
:returns: The length, in pixels, of the written string.
:param string: The text string to write
:param offset_x: Position the text along x (default 0)
:param offset_y: Position the text along y (default 0)
:param kerning: Whether to kern the characters closely together or display one per matrix (default True)
:Examples:
Write a string to the buffer, aligning one character per dislay, This is
ideal for displaying still messages up to 6 characters long::
microdotphat.write_string("Bilge!", kerning=False)
Write a string to buffer, with the characters as close together as possible.
This is ideal for writing text which you intend to scroll::
microdotphat.write_string("Hello World!")
"""
str_buf = []
space = [0x00] * 5
gap = [0x00] * 3
if kerning:
space = [0x00] * 2
gap = [0x00]
for char in string:
if char == ' ':
str_buf += space
else:
char_data = numpy.array(_get_char(char))
if kerning:
char_data = numpy.trim_zeros(char_data)
str_buf += list(char_data)
str_buf += gap # Gap between chars
if not kerning:
while len(str_buf) < WIDTH + 3:
str_buf += [0x00]
for x in range(len(str_buf)):
for y in range(7):
p = (str_buf[x] & (1 << y)) > 0
set_pixel(offset_x + x, offset_y + y, p)
length = len(str_buf)
del str_buf
return length
def scroll(amount_x=0, amount_y=0):
"""Scroll the buffer
Will scroll by 1 pixel horizontall if no arguments are supplied.
:param amount_x: Amount to scroll along x axis (default 0)
:param amount_y: Amount to scroll along y axis (default 0)
:Examples:
Scroll vertically::
microdotphat.scroll(amount_y=1)
Scroll diagonally::
microdotphat.scroll(amount_x=1,amount_y=1)
"""
global _scroll_x, _scroll_y
if amount_x == 0 and amount_y == 0:
amount_x = 1
_scroll_x += amount_x
_scroll_y += amount_y
_scroll_x %= _buf.shape[1]
_scroll_y %= _buf.shape[0]
def scroll_to(position_x=0, position_y=0):
"""Scroll to a specific position
:param position_x: Desired position along x axis (default 0)
:param position_y: Desired position along y axis (default 0)
"""
global _scroll_x, _scroll_y
_scroll_x = position_x % _buf.shape[1]
_scroll_y = position_y % _buf.shape[0]
def scroll_horizontal(amount=1):
"""Scroll horizontally (along x)
Will scroll one pixel horizontally if no amount is supplied.
:param amount: Amount to scroll along x axis (default 1)
"""
scroll(amount_x=amount, amount_y=0)
def scroll_vertical(amount=1):
"""Scroll vertically (along y)
Will scroll one pixel vertically if no amount is supplied.
:param amount: Amount to scroll along y axis (default 1)
"""
scroll(amount_x=0, amount_y=amount)
def set_brightness(brightness):
"""Set the display brightness
:param brightness: Brightness to set, from 0.0 to 1.0
"""
setup()
if brightness < 0 or brightness > 1:
raise ValueError("Brightness should be between 0.0 and 1.0")
for m_x in range(6):
_mat[m_x][0].set_brightness(brightness)
def show():
"""Output the buffer to the display
A copy of the buffer will be scrolled and rotated according
to settings before being drawn to the display.
"""
setup()
scrolled_buffer = numpy.copy(_buf)
scrolled_buffer = numpy.roll(scrolled_buffer, -_scroll_x, axis=1)
scrolled_buffer = numpy.roll(scrolled_buffer, -_scroll_y, axis=0)
if _rotate180:
scrolled_buffer = numpy.rot90(scrolled_buffer[:7, :45], 2)
if _mirror:
scrolled_buffer = numpy.fliplr(scrolled_buffer[:7, :45])
for m_x in range(6):
x = (m_x * 8)
b = scrolled_buffer[0:7, x:x + 5]
_mat[m_x][0].set_decimal(_mat[m_x][1], _decimal[m_x])
for x in range(5):
for y in range(7):
try:
_mat[m_x][0].set_pixel(_mat[m_x][1], x, y, b[y][x])
except IndexError:
pass # Buffer doesn't span this matrix yet
del b
for m_x in range(0, 6, 2):
_mat[m_x][0].update()
def draw_tiny(display, text):
"""Draw tiny numbers to the buffer
Useful for drawing things like IP addresses.
Can sometimes fit up to 3 digits on a single matrix
:param display: Index from 0 to 5 of display to target, determines buffer offset
:param text: Number to display
"""
_buf = []
try:
for num in [int(x) for x in text]:
_buf += _tinynumbers[num]
_buf += [0] # Space
except ValueError:
raise ValueError("text should contain only numbers: '{text}'".format(text=text))
for row in range(min(len(_buf), 7)):
data = _buf[row]
offset_x = display * 8
offset_y = 6 - (row % 7)
for d in range(5):
set_pixel(offset_x + (4 - d), offset_y, (data & (1 << d)) > 0)
def setup():
global _is_setup, _n1, _n2, _n3, _mat
if _is_setup:
return True
_n1 = NanoMatrix(address=0x63)
_n2 = NanoMatrix(address=0x62)
_n3 = NanoMatrix(address=0x61)
_mat = [(_n1, 1), (_n1, 0), (_n2, 1), (_n2, 0), (_n3, 1), (_n3, 0)]
atexit.register(_exit)
_is_setup = True
|
import math
import time_functions
import numpy as np
import sys
# Function calculates projection of an arbitrary vector
# on an arbitrary plane given by its normal vector representation.
def calculate_projection_of_vector_on_plane(vector, surface_normal_of_plane):
k = np.dot(vector, surface_normal_of_plane) / np.sum(np.square(surface_normal_of_plane))
vp = vector - k * surface_normal_of_plane
return vp
# Function rotates a point about an arbitrary axis.
# Accomplished by aligning the rotation axis with z axis and then rotating by required angle.
# Then the backward transformation is done.
def calculate_theta_phi(rotation_axis):
x = rotation_axis[0]
y = rotation_axis[1]
z = rotation_axis[2]
if x >= 0 and y >= 0 and z >= 0:
if z == 0 and x == 0:
theta = 0
phi = math.pi/2
elif z == 0 and x != 0 :
theta = math.pi/2
phi = math.atan(y/math.sqrt(x**2 + z**2))
else:
theta = math.atan(x/z)
phi = math.atan(y/math.sqrt(x**2 + z**2))
return theta,phi
elif x >= 0 and y >= 0 and z < 0:
theta = math.pi + math.atan(x/z)
phi = math.atan(y/math.sqrt(x**2 + z**2))
return theta,phi
elif x < 0 and y >= 0 and z <= 0:
if z == 0:
theta = 3*math.pi/2
phi = math.atan(y/math.sqrt(x**2 + z**2))
else:
theta = math.pi + math.atan(x/z)
phi = math.atan(y/math.sqrt(x**2 + z**2))
return theta,phi
elif x < 0 and y >= 0 and z > 0:
theta = 2*math.pi + math.atan(x/z)
phi = math.atan(y/math.sqrt(x**2 + z**2))
return theta,phi
elif x >= 0 and y < 0 and z >= 0:
if z == 0 and x == 0:
theta = 0
phi = -math.pi/2
elif z == 0 and x != 0:
theta = math.pi/2
phi = math.atan(y/math.sqrt(x**2 + z**2))
else:
theta = math.atan(x/z)
phi = math.atan(y/math.sqrt(x**2 + z**2))
return theta,phi
elif x >= 0 and y < 0 and z < 0:
if x == 0:
theta = 2*math.pi
phi = math.atan(y/math.sqrt(x**2 + z**2))
else:
theta = math.pi + math.atan(x/z)
phi = math.atan(y/math.sqrt(x**2 + z**2))
return theta,phi
elif x < 0 and y < 0 and z <= 0:
if z == 0:
theta = 3*math.pi/2
phi = math.atan(y/math.sqrt(x**2 + z**2))
else:
theta = math.pi + math.atan(x/z)
phi = math.atan(y/math.sqrt(x**2 + z**2))
return theta,phi
elif x < 0 and y < 0 and z > 0:
theta = 2*math.pi + math.atan(x/z)
phi = math.atan(y/math.sqrt(x**2 + z**2))
return theta,phi
def rotate_point_about_arbitrary_axis_in_3d(position_vector_of_tail_of_rotation_axis,
position_vector_of_tip_of_rotation_axis,
coordinates_to_rotate,
rotation_angle_in_degrees):
rad_rotation_angle = math.radians(rotation_angle_in_degrees)
# Convert to homogenous coordinates
rotation_axis = np.round(np.array(position_vector_of_tip_of_rotation_axis) - np.array(position_vector_of_tail_of_rotation_axis),5)
coordinates_to_rotate = list(coordinates_to_rotate)
coordinates_to_rotate.append(1)
coordinates_of_rotation_axis = position_vector_of_tail_of_rotation_axis
translation_to_origin_matrix = [[1, 0, 0, -coordinates_of_rotation_axis[0]],
[0, 1, 0, -coordinates_of_rotation_axis[1]],
[0, 0, 1, -coordinates_of_rotation_axis[2]],
[0, 0, 0, 1]]
# Find angle between z axis and the projection of the shifted rotation axis on xz plane
# Find angle between transformed rotation axis and z axis
theta,phi = calculate_theta_phi(rotation_axis)
# # Matrix for Rotating the shifted north tangent vector by -theta about y axis
rotate_by_negative_theta_about_y_axis_matrix = [[math.cos(theta), 0, -math.sin(theta), 0],
[0, 1, 0, 0],
[math.sin(theta), 0, math.cos(theta), 0],
[0, 0, 0, 1]]
# # Matrix for Rotating transformed north tangent vector by phi about x axis
rotate_by_phi_about_x_axis_matrix = [[1, 0, 0, 0],
[0, math.cos(phi), -math.sin(phi), 0],
[0, math.sin(phi), math.cos(phi), 0],
[0, 0, 0, 1]]
# Rotate coordinates_to_rotate by rotation angle about z axis
rotate_by_negative_azimuth_about_z_axis_matrix = [
[math.cos(rad_rotation_angle), -math.sin(rad_rotation_angle), 0, 0],
[math.sin(rad_rotation_angle), math.cos(rad_rotation_angle), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
# Rotate by negative phi about x
rotate_by_negative_phi_about_x_matrix = [[1, 0, 0, 0],
[0, math.cos(phi), math.sin(phi), 0],
[0, -math.sin(phi), math.cos(phi), 0],
[0, 0, 0, 1]]
# Rotate by theta about y
rotate_by_theta_about_y_matrix = [[math.cos(theta), 0, math.sin(theta), 0],
[0, 1, 0, 0],
[-math.sin(theta), 0, math.cos(theta), 0],
[0, 0, 0, 1]]
# Translate point from origin to observer location
translation_to_observer_location_matrix = [[1, 0, 0, coordinates_of_rotation_axis[0]],
[0, 1, 0, coordinates_of_rotation_axis[1]],
[0, 0, 1, coordinates_of_rotation_axis[2]],
[0, 0, 0, 1]]
rot_coord = np.matmul(translation_to_observer_location_matrix,rotate_by_theta_about_y_matrix)
rot_coord = np.matmul(rot_coord,rotate_by_negative_phi_about_x_matrix)
rot_coord = np.matmul(rot_coord,rotate_by_negative_azimuth_about_z_axis_matrix)
rot_coord = np.matmul(rot_coord,rotate_by_phi_about_x_axis_matrix)
rot_coord = np.matmul(rot_coord,rotate_by_negative_theta_about_y_axis_matrix)
rot_coord = np.matmul(rot_coord,translation_to_origin_matrix)
rot_coord = np.matmul(rot_coord,coordinates_to_rotate)
return rot_coord[0:3]
# Converts equatorial coordinates of RA/DEC to horizon coordinates of azimuth and
# altitude. Returns azimuth and altitude in degrees.
def eq_to_hor(ra,
dec,
time_of_observation_in_datetime_format,
latitude_of_observer,
longitude_of_observer,
local_standard_time_meridian):
# Calculate sidereal time at observer's location
sidereal_time_at_observer_longitude = \
time_functions.local_sidereal_time(time_of_observation_in_datetime_format,
local_standard_time_meridian,
longitude_of_observer)
# Calculate hour angle
sidereal_time_at_observer_longitude = time_functions.convert_time_to_decimal(sidereal_time_at_observer_longitude)
HA = sidereal_time_at_observer_longitude - ra
# Calculate longitude of substellar point
longitude_of_substellar_point = new_longitude(longitude_of_observer,-HA*360/24)
latitude_of_substellar_point = dec
# Convert angles in degrees to radians.
# Find the sine and cosine of all angles involved to convert spherical coordinates
# of location of observer and location of substellar point at observation time to cartesian coordinates.
rad_lat_obs = math.radians(latitude_of_observer)
rad_lon_obs = math.radians(longitude_of_observer)
rad_lat_ss = math.radians(latitude_of_substellar_point)
rad_lon_ss = math.radians(longitude_of_substellar_point)
cos_lat_obs = math.cos(rad_lat_obs)
sin_lat_obs = math.sin(rad_lat_obs)
cos_lon_obs = math.cos(rad_lon_obs)
sin_lon_obs = math.sin(rad_lon_obs)
cos_lat_ss = math.cos(rad_lat_ss)
sin_lat_ss = math.sin(rad_lat_ss)
cos_lon_ss = math.cos(rad_lon_ss)
sin_lon_ss = math.sin(rad_lon_ss)
# Unit position vectors of observer and substellar points in cartesian coordinates
position_vector_of_observer = np.array([cos_lat_obs * cos_lon_obs,
cos_lat_obs * sin_lon_obs,
sin_lat_obs])
position_vector_of_substellar_point = np.array([cos_lat_ss * cos_lon_ss,
cos_lat_ss * sin_lon_ss,
sin_lat_ss])
# Project the vector from observer location and North Pole on the tangent plane at observer.
# This gives the direction vector of local North.
# Also project the vector from observer to substellar point to the same plane
# to provide the direction of azimuth.
# Angle between these 2 vectors gives the azimuth if HA is <0.
# If HA is greater than 0, subtract from 360 to get azimuth.
# Project observer to substellar point vector to surface tangent plane at observer.
observer_to_substellar_point_vector = position_vector_of_substellar_point - position_vector_of_observer + 1e-12
surface_normal_to_local_tangent_plane_at_observer = position_vector_of_observer
projection_of_observer_to_substellar_point_vector_on_surface_tangent_plane_at_observer = calculate_projection_of_vector_on_plane(
observer_to_substellar_point_vector,
surface_normal_to_local_tangent_plane_at_observer)
projection_of_observer_to_substellar_point_vector_on_surface_tangent_plane_at_observer = \
projection_of_observer_to_substellar_point_vector_on_surface_tangent_plane_at_observer / np.sqrt(
np.sum(np.square(projection_of_observer_to_substellar_point_vector_on_surface_tangent_plane_at_observer)))
# Project the observer to north pole vector to get local north vector
position_vector_of_north_pole = np.array([0, 0, 1])
observer_to_north_pole_vector = position_vector_of_north_pole - position_vector_of_observer
local_tangent_pointing_north_vector = \
calculate_projection_of_vector_on_plane(observer_to_north_pole_vector,
surface_normal_to_local_tangent_plane_at_observer)
local_tangent_pointing_north_vector = \
local_tangent_pointing_north_vector / \
np.sqrt(np.sum(np.square(local_tangent_pointing_north_vector)))
azimuth = math.acos(np.dot(local_tangent_pointing_north_vector,
projection_of_observer_to_substellar_point_vector_on_surface_tangent_plane_at_observer))
azimuth = math.degrees(azimuth)
if sidereal_time_at_observer_longitude > ra:
d1 = np.abs(sidereal_time_at_observer_longitude - ra)
d2 = np.abs(sidereal_time_at_observer_longitude - (ra + 24))
if d1 < d2:
azimuth = 360 - azimuth
else:
azimuth = azimuth
elif ra > sidereal_time_at_observer_longitude:
d1 = np.abs(ra - sidereal_time_at_observer_longitude)
d2 = np.abs(ra - (sidereal_time_at_observer_longitude + 24))
if d1 < d2:
azimuth = azimuth
else:
azimuth = 360 - azimuth
elif ra == sidereal_time_at_observer_longitude:
azimuth = 180
# # Calculate Altitude
altitude_ = math.acos(cos_lat_obs * cos_lat_ss * math.cos(rad_lon_obs - rad_lon_ss) +
sin_lat_obs * sin_lat_ss)
altitude_ = math.degrees(altitude_)
altitude = 90 - altitude_
return azimuth, altitude
# Function converts horizon coordinates of altitude and azimuth to
# equatorial coordinates of RA and DEC. RA is returned as hour between 0 and 24
# and DEC is returned in degrees.
def hor_to_eq(azimuth,
altitude,
time_of_observation_in_datetime_format,
latitude_of_observer,
longitude_of_observer,
local_standard_time_meridian):
# Calculate sidereal time at observer's location
sidereal_time_at_observer_longitude = \
time_functions.local_sidereal_time(time_of_observation_in_datetime_format,
local_standard_time_meridian,
longitude_of_observer)
# Convert all known angles to radians and compute sine and cosine of the angles
rad_lat_obs = math.radians(latitude_of_observer)
rad_lon_obs = math.radians(longitude_of_observer)
cos_lat_obs = math.cos(rad_lat_obs)
sin_lat_obs = math.sin(rad_lat_obs)
cos_lon_obs = math.cos(rad_lon_obs)
sin_lon_obs = math.sin(rad_lon_obs)
# Compute vectors in cartesian coordinates
position_vector_of_observer = np.array([cos_lat_obs * cos_lon_obs, cos_lat_obs * sin_lon_obs, sin_lat_obs])
surface_normal_to_local_tangent_plane_at_observer = position_vector_of_observer
position_vector_of_north_pole = np.array([0, 0, 1])
observer_to_north_pole_vector = position_vector_of_north_pole - position_vector_of_observer
local_tangent_pointing_north_vector = calculate_projection_of_vector_on_plane(observer_to_north_pole_vector,
surface_normal_to_local_tangent_plane_at_observer)
coordinates_of_local_tangent_pointing_north = position_vector_of_observer + \
np.array(local_tangent_pointing_north_vector)
position_vector_of_tail_of_rotation_axis = position_vector_of_observer
position_vector_of_tip_of_rotation_axis = 2*position_vector_of_observer
# Find expression for azimuth direction coordinates by rotating about the normal of tangent plane at observer
azimuth_direction_coordinates = rotate_point_about_arbitrary_axis_in_3d(
position_vector_of_tail_of_rotation_axis=position_vector_of_tail_of_rotation_axis,
position_vector_of_tip_of_rotation_axis=position_vector_of_tip_of_rotation_axis,
coordinates_to_rotate=coordinates_of_local_tangent_pointing_north,
rotation_angle_in_degrees=-azimuth)
# Rotate coordinates of azimuth direction about surface normal at observer by an additional 90 degrees
coordinates_of_90_degree_away_from_azimuth = rotate_point_about_arbitrary_axis_in_3d(
position_vector_of_tail_of_rotation_axis=position_vector_of_tail_of_rotation_axis,
position_vector_of_tip_of_rotation_axis=position_vector_of_tip_of_rotation_axis,
coordinates_to_rotate=azimuth_direction_coordinates,
rotation_angle_in_degrees=-90)
# Rotate azimuth_direction_coordinates by elevation angle about the ninety_degree_away_vector axis from_azimuth_vector
substellar_point_coordinates = rotate_point_about_arbitrary_axis_in_3d(
position_vector_of_tail_of_rotation_axis=position_vector_of_tail_of_rotation_axis,
position_vector_of_tip_of_rotation_axis=coordinates_of_90_degree_away_from_azimuth,
coordinates_to_rotate=azimuth_direction_coordinates,
rotation_angle_in_degrees=altitude)
substellar_point_vector = substellar_point_coordinates - position_vector_of_tail_of_rotation_axis
unit_substellar_point_vector = substellar_point_vector / np.linalg.norm(substellar_point_vector)
DEC = math.asin(unit_substellar_point_vector[2])
# Find longitude of substellar point
cos_lon_ss = unit_substellar_point_vector[0] / math.cos(DEC)
lon_ss = math.acos(cos_lon_ss)
lon_ss = math.degrees(lon_ss)
one_eighty_opposite_lon_obs = new_longitude(longitude_of_observer,180)
# If azimuth is between 0 and 180 but lon_ss lies west of observer's longitude
if (azimuth > 0 and azimuth < 180) and ((lon_ss < longitude_of_observer and lon_ss >= 0) or
(lon_ss > one_eighty_opposite_lon_obs and lon_ss <= 0)):
lon_ss = new_longitude(-lon_ss,180)
# If azimuth is between 180 and 360 but lon_ss lies east of observer's longitude
elif (azimuth > 180 and azimuth < 360) and ((lon_ss > longitude_of_observer and lon_ss <=180) or
(lon_ss <=one_eighty_opposite_lon_obs and lon_ss <=-180)) :
lon_ss = new_longitude(-lon_ss, 180)
sidereal_time_at_observer_longitude = time_functions.convert_time_to_decimal(sidereal_time_at_observer_longitude)
RA = sidereal_time_at_observer_longitude - (longitude_of_observer - lon_ss) / 15
if RA < 0:
RA = RA + 24
elif RA > 24:
RA = RA - 24
DEC = math.degrees(DEC)
return round(RA,5), round(DEC,5)
# Calculates new longitude from current longitude based on delta longitude
def new_longitude(longitude_in_degrees,
delta_longitude_in_degrees):
delta_longitude_int = int(np.abs(delta_longitude_in_degrees) / 180)
delta_longitude_mod = np.abs(delta_longitude_in_degrees) % 180
if delta_longitude_int % 2 == 0:
longitude_in_degrees = longitude_in_degrees
else:
if longitude_in_degrees >= 0:
longitude_in_degrees = -(180 - longitude_in_degrees)
else:
longitude_in_degrees = 180 - np.abs(longitude_in_degrees)
if longitude_in_degrees >= 0:
if delta_longitude_in_degrees >= 0:
lon_ss = longitude_in_degrees + delta_longitude_mod
if lon_ss > 180:
lon_ss = -(360 - lon_ss)
else:
lon_ss = longitude_in_degrees - delta_longitude_mod
elif longitude_in_degrees < 0:
if delta_longitude_in_degrees >= 0:
lon_ss = longitude_in_degrees + delta_longitude_mod
else:
lon_ss = longitude_in_degrees - delta_longitude_mod
if lon_ss < -180:
lon_ss = 180 - (-180 - lon_ss)
return lon_ss
|
import numpy as np
import matplotlib.pyplot as plt
import useful as use
import pdb
from decimal import Decimal
np.set_printoptions(threshold=np.inf)
fig1, ax3 = plt.subplots()
fig2, ax4 = plt.subplots()
fig3, ax5 = plt.subplots()
##############################################
#Theoretical Solution
dt = .001
l = [1]
r_0 = 1
v_0 = 0
t = 0
r = r_0
v = v_0
s = 2**(1/3)
w_0 = 1
gamma=.6
w = np.sqrt(w_0**2-gamma**2)
timing = [0]
for i in range(0,int(2*2*np.pi/dt)):
r = np.e**(-gamma*t)*(r_0*np.cos(w*t)+(v_0+gamma*r_0)/w*np.sin(w*t))
t+=dt
l.append(r)
timing.append(t+dt)
ax3.plot(timing,l)
ax4.plot(timing,l)
# err_theory = []
# count = 0
# for i in range(len(l)):
# test = count/9000
# if Decimal(test) % Decimal(0):
# err_theory.append(l[i])
# count+=1
# print(err_theory)
################################################################
################################################################
################################################################
#First Order
dt = .9
l = [1]
r_0 = 1
v_0 = 0
t = 0
w_0 = 1
gamma=.6
w = np.sqrt(w_0**2-gamma**2)
timing = [0]
r = r_0
v = v_0
s = 2**(1/3)
for i in range(0,int((2*2*np.pi)/dt)):
r = r+v*dt
v = v - w_0**2*r*dt
v = np.e**(-2*gamma*dt)*v
l.append(r)
timing.append(timing[len(timing)-1]+dt)
r = np.asarray(r)
ax3.plot(timing,l)
################################################################
################################################################
#Second Order
l = [1]
r_0 = 1
v_0 = 0
t = 0
w_0 = 1
gamma=.6
w = np.sqrt(w_0**2-gamma**2)
timing = [0]
r = r_0
v = v_0
s = 2**(1/3)
for i in range(0,int((4*np.pi)/dt)):
v = v - w_0**2*r*.5*dt
v = np.e**(-2*gamma*.5*dt)*v
r = r+v*dt
v = np.e**(-2*gamma*.5*dt)*v
v = v - w_0**2*r*.5*dt
l.append(r)
timing.append(timing[len(timing)-1]+dt)
ax3.plot(timing,l)
################################################################
################################################################
#Fourth Order
l = [1]
r_0 = 1
v_0 = 0
t = 0
w_0 = 1
gamma=.6
w = np.sqrt(w_0**2-gamma**2)
r = r_0
v = v_0
s = 2**(1/3)
H = dt/(2-s)
timing = [0]
for i in range(0,int((4*np.pi)/dt)):
v = v - w_0**2*r*.5*H
v = np.e**(-2*gamma*.5*H)*v
r = r+v*H
v = np.e**(-2*gamma*.5*H)*v
v = v - w_0**2*r*.5*H
v = v - w_0**2*r*.5*-s*H
v = np.e**(-2*gamma*.5*-s*H)*v
r = r+v*-s*H
v = np.e**(-2*gamma*.5*-s*H)*v
v = v - w_0**2*r*.5*-s*H
v = v - w_0**2*r*.5*H
v = np.e**(-2*gamma*.5*H)*v
r = r+v*H
v = np.e**(-2*gamma*.5*H)*v
v = v - w_0**2*r*.5*H
l.append(r)
timing.append(timing[len(timing)-1]+dt)
ax4.plot(timing,l)
ax4.set_ylabel('Particle Displacement')
ax4.set_xlabel('Time')
ax4.legend(('Theory','4th Order'), loc='upper right')
ax4.set_title("Damped HO - Comparison with .9 Timestep", va='bottom')
###################################################
###################################################
#graphing
ax3.set_ylabel('Particle Displacement')
ax3.set_xlabel('Time')
ax3.legend(('Theory', '1st Order', '2nd Order', '4th Order'), loc='upper right')
ax3.set_title("Damped HO - Comparison with .9 Timestep", va='bottom')
plt.show() |
# Xiaomi Chuangmi Camera
|
from selenium.webdriver.common.by import By
from .BasePage import BasePage
# Работает на главной странице yandex.ru
# В класс передается драйвер
# click_on_yandex_service принимает в аргумент имя сервиса (например, "Картинки"), возвращает элемент сервиса на странице
# *также можно передать значение для аргумента return_href (например, True), тогда метод вернет ссылку на сервис
# enter_text_into_search возвращает элемент поля поиска
# find_button_click возвращает элемент кнопки "Найти"
class StartPage(BasePage):
def click_on_yandex_service(self, requested_service: str):
services_list = self.waitAll(By.CLASS_NAME, "services-new__item-title")
for el in services_list:
if requested_service in el.text:
el.click()
|
# This file is generated, do not modify
import datetime
VERSION = (1, 1, 39)
VERSION_STR = '1.1.39'
PRODUCT = 'gatling/version.py'
REQ_MD5 = '66d2b20c6fa5c95ac17a05d908889c66'
PY_MD5 = '718bdc24d15cec0096bd0ab6ed8e405b'
TIMESTAMP = datetime.datetime(2020, 2, 21, 2, 17, 35, 784725)
USER_NAME = 'Philip Bergen'
USER_EMAIL = 'pbergen@salesforce.com'
|
#! /usr/bin/python
#
# postgre_python_update.py
#
# Sep/06/2016
#
# -------------------------------------------------------------------
import cgi
import string
import sys
import psycopg2
#
import json
#
#
# -------------------------------------------------------------------
sys.path.append ('/var/www/data_base/common/python_common')
#
from sql_manipulate import sql_update_proc
from cgi_manipulate import parse_parameter
#
#
# --------------------------------------------------------
conn = psycopg2.connect("dbname=city user=scott password=tiger")
cur = conn.cursor()
#
# --------------------------------------------------------
#
print ("Content-type: text/html\n\n")
#
# ---------------------------------------------------------------
array_bb = parse_parameter ()
#
for it in range (len(array_bb)):
id_in = array_bb[it]['id']
population_in = int (array_bb[it]['population'])
print ("id_in = %s<br />" % id_in)
print ("population_in = %d<br />" % population_in)
sql_update_proc (cur,id_in,population_in)
#
conn.commit ()
#
cur.close ()
conn.close ()
#
#
print ("OK<br />")
#
|
from __future__ import unicode_literals
from django.db import models, connection
from .private_media import *
from django.contrib.auth.models import User
from django.core.validators import MaxValueValidator, MinValueValidator
from tinymce import HTMLField
import uuid
# ENUM choices
DISCOUNT_CHOICE = (
('Percentage', 'Percentage'),
('Amount', 'Amount'),
)
IS_DELETED_CHOICE = (
('TRUE', 'TRUE'),
('FALSE', 'FALSE'),
)
PAGE_LAYOUT = (
('Landscape','Landscape'),
('Portrait','Portrait'),
)
PERMISSION_LEVEL = (
(0, 'No Permission'),
(1, 'Read Only'),
(2, 'Edit Only'),
(3, 'Add and Edit'),
(4, 'Full Permission'),
)
PERMISSION_BOOLEAN = (
(0, 'No Permission'),
(1, 'Has Permission'),
)
PRODUCT_OR_SERVICE = (
('Product', 'Product'),
('Service', 'Service'),
)
PROJECT_STATUS_CHOICE = (
('New', 'New'),
('Open', 'Open'),
('Resolved', 'Resolved'),
('Closed', 'Closed'),
)
QUOTE_APPROVAL_STATUS = (
('REJECTED', 'REJECTED'),
('DRAFT', 'DRAFT'),
('APPROVED', 'APPROVED'),
)
WANT_CHOICE=(
('0','Do not want to do'),
('1','Want to do'),
)
SKILL_CHOICE=(
('0','Can not do'),
('1','Willing to learn'),
('2','Knows a little'),
('3','Knows a lot'),
('4','Proficient'),
)
WEBSITE_SOURCE=(
('Twitter','Twitter'),
('Facebook','Facebook'),
('Github','Github'),
('Gitlab','Gitlab'),
('Website','Website'),
('LinkedIn','LinkedIn'),
('Staff Page','Staff page'),
('Other','Other'),
)
# List of tables - in alphabetical order
class about_user(models.Model):
about_user_id=models.AutoField(primary_key=True)
about_user_text=HTMLField()
user=models.ForeignKey(
User,
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
class Meta:
db_table = "about_user"
class assigned_user(models.Model):
assigned_user_id = models.AutoField(primary_key=True)
user_id = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
blank=True,
null=True
)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
blank=True,
null=True,
)
opportunity_id = models.ForeignKey(
'opportunity',
on_delete=models.CASCADE,
blank=True,
null=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
class Meta:
db_table = "assigned_user"
"""
Contact History is a simple form that user will fill out every time they
have some form of contact with the customer. This table will store both
contact history for customer and Organisations. The customer field in
this instance is not required, and implies that the contact history is
applied to the organisation. The organisation field will fill out automatically
when a user applies it to a customer. :)
"""
class contact_history(models.Model):
contact_history_id = models.AutoField(primary_key=True)
organisation_id = models.ForeignKey(
'organisation',
on_delete=models.CASCADE,
)
customer_id = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
blank=True,
null=True
)
contact_type = models.ForeignKey(
'list_of_contact_type',
on_delete=models.CASCADE,
)
contact_date = models.DateTimeField()
contact_history = HTMLField('contact_history')
document_key = models.ForeignKey(
'document',
on_delete=models.CASCADE,
null=True,
blank=True,
)
user_id = models.ForeignKey(
User,
on_delete=models.CASCADE
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "contact_history"
class bug(models.Model):
bug_id = models.AutoField(primary_key=True)
bug_client = models.ForeignKey(
'bug_client',
on_delete=models.CASCADE,
)
bug_code = models.CharField(max_length=255) # Just stores the code of the bug
bug_description = models.TextField()
bug_status = models.CharField(max_length=50) # Updated manually?
project = models.ForeignKey(
'project',
on_delete=models.CASCADE,
null=True,
blank=True,
)
task = models.ForeignKey(
'task',
on_delete=models.CASCADE,
null=True,
blank=True,
)
requirement = models.ForeignKey(
'requirement',
on_delete=models.CASCADE,
null=True,
blank=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey \
(User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.bug_description)
class Meta:
db_table = "bug"
class bug_client(models.Model):
bug_client_id = models.AutoField(primary_key=True)
bug_client_name = models.CharField(max_length=50)
list_of_bug_client = models.ForeignKey(
'list_of_bug_client',
on_delete=models.CASCADE,
)
bug_client_url = models.URLField()
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey \
(User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.bug_client_name)
class Meta:
db_table = "bug_client"
class campus(models.Model):
campus_id = models.AutoField(primary_key=True)
organisation_id = models.ForeignKey(
'organisation',
on_delete=models.CASCADE,
blank=True,
null=True,
)
customer = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
blank=True,
null=True,
)
campus_nickname = models.CharField(max_length=100)
campus_phone = models.CharField(
max_length=20,
null=True
)
campus_fax = models.CharField(
max_length=20,
null=True
)
campus_address1 = models.CharField(
max_length=255,
null=True
)
campus_address2 = models.CharField(
max_length=255,
null=True
)
campus_address3 = models.CharField(
max_length=255,
null=True
)
campus_suburb = models.CharField(max_length=50)
campus_region_id = models.ForeignKey(
'list_of_country_region',
on_delete=models.CASCADE,
)
campus_postcode = models.CharField(
max_length=10,
null=True,
blank=True,
)
campus_country_id = models.ForeignKey(
'list_of_country',
on_delete=models.CASCADE,
)
campus_longitude = models.DecimalField(
decimal_places=13,
max_digits=16,
null=True, # If use has no mapping software, we want to leave this blank
blank=True,
)
campus_latitude = models.DecimalField(
decimal_places=13,
max_digits=16,
null=True, # If use has no mapping software, we want to leave this blank
blank=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.campus_nickname)
class Meta:
db_table = "campus"
class cost(models.Model):
cost_id = models.AutoField(primary_key=True)
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
blank=True,
null=True
)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
blank=True,
null=True
)
cost_description = models.CharField(max_length=255, )
cost_amount = models.DecimalField(
max_digits=19,
decimal_places=2
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey \
(User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str('$' + str(self.cost_amount))
class Meta:
db_table = "cost"
class customer(models.Model):
customer_id = models.AutoField(primary_key=True)
customer_title = models.ForeignKey(
'list_of_title',
on_delete=models.CASCADE,
)
customer_first_name = models.CharField(max_length=50)
customer_last_name = models.CharField(max_length=50)
customer_email = models.CharField(max_length=200)
customer_profile_picture = models.ImageField(
blank=True,
null=True,
upload_to='profile_pictures'
)
organisation_id = models.ForeignKey(
'organisation',
on_delete=models.CASCADE,
null=True,
blank=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(
str(self.customer_id)
+ ' - '
+ self.customer_first_name
+ ' '
+ self.customer_last_name
)
class Meta:
db_table = "customer"
class customer_campus(models.Model):
customer_campus_id = models.AutoField(primary_key=True)
customer_id = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
)
campus_id = models.ForeignKey(
'campus',
on_delete=models.CASCADE,
)
customer_phone = models.CharField(max_length=20)
customer_fax = models.CharField(max_length=20)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "customer_campus"
class document(models.Model):
document_key = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
)
document_description = models.CharField(max_length=255)
document_url_location = models.TextField(
# Contains URLS
null=True,
blank=True,
)
document = models.FileField(
blank=True,
null=True,
storage=File_Storage(),
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
class Meta:
db_table = "document"
def __str__(self):
return str(self.document_description)
class document_folder(models.Model):
document_folder_id = models.AutoField(primary_key=True)
document_key = models.ForeignKey(
'document',
on_delete=models.CASCADE,
)
folder_id = models.ForeignKey(
'folder',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "document_folder"
class document_permission(models.Model):
document_permisssion_id = models.AutoField(primary_key=True)
document_key = models.ForeignKey(
'document',
on_delete=models.CASCADE,
)
project_id = models.ForeignKey(
'project',
blank=True,
null=True,
on_delete=models.CASCADE,
)
task_id = models.ForeignKey(
'task',
blank=True,
null=True,
on_delete=models.CASCADE,
)
organisation_id = models.ForeignKey(
'organisation',
blank=True,
null=True,
on_delete=models.CASCADE,
)
customer_id = models.ForeignKey(
'customer',
blank=True,
null=True,
on_delete=models.CASCADE,
)
opportunity_id = models.ForeignKey(
'opportunity',
blank=True,
null=True,
on_delete=models.CASCADE,
)
requirement = models.ForeignKey(
'requirement',
blank=True,
null=True,
on_delete=models.CASCADE,
)
requirement_item = models.ForeignKey(
'requirement_item',
blank=True,
null=True,
on_delete=models.CASCADE,
)
user_id = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=True,
blank=True
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "document_permission"
class email_contact(models.Model):
email_contact_id = models.AutoField(primary_key=True)
email_content = models.ForeignKey(
'email_content',
on_delete=models.CASCADE,
)
to_customer = models.ForeignKey(
'customer',
related_name='%(class)s_to_customer',
on_delete=models.CASCADE,
blank=True,
null=True,
)
cc_customer = models.ForeignKey(
'customer',
related_name='%(class)s_cc_customer',
on_delete=models.CASCADE,
blank=True,
null=True,
)
bcc_customer = models.ForeignKey(
'customer',
related_name='%(class)s_bcc_customer',
on_delete=models.CASCADE,
blank=True,
null=True,
)
organisation = models.ForeignKey(
'organisation',
on_delete=models.CASCADE,
blank=True,
null=True,
)
project = models.ForeignKey(
'project',
on_delete=models.CASCADE,
blank=True,
null=True,
)
task = models.ForeignKey(
'task',
on_delete=models.CASCADE,
blank=True,
null=True,
)
opportunity = models.ForeignKey(
'opportunity',
on_delete=models.CASCADE,
blank=True,
null=True,
)
quotes = models.ForeignKey(
'quote',
on_delete=models.CASCADE,
blank=True,
null=True,
)
is_private = models.BooleanField(default=False)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "email_contact"
class email_content(models.Model):
email_content_id = models.AutoField(primary_key=True)
email_subject = models.CharField(max_length=255)
email_content = HTMLField('email_content')
is_private = models.BooleanField(default=False)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "email_content"
class folder(models.Model):
folder_id = models.AutoField(primary_key=True)
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
blank=True,
null=True
)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
blank=True,
null=True
)
folder_description = models.CharField(max_length=255)
parent_folder_id = models.ForeignKey(
'self',
blank=True,
null=True,
on_delete=models.CASCADE
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.folder_description)
class Meta:
db_table = "folder"
class group(models.Model):
group_id = models.AutoField(primary_key=True)
group_name = models.CharField(
max_length=50,
unique=True
)
parent_group = models.ForeignKey(
"self",
on_delete=models.CASCADE,
null=True,
blank=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def natural_key(self):
return (
self.group_id,
self.group_name
)
def __str__(self):
return str(self.group_name)
class Meta:
db_table = "group"
class group_manager(models.Manager):
def get_by_natural_key(
self,
group_id,
group_name
):
return self.get(
group_id=group_id,
group_name=group_name
)
class group_permission(models.Model):
group_permission_id = models.AutoField(primary_key=True)
permission_set = models.ForeignKey(
'permission_set',
on_delete=models.CASCADE,
)
group = models.ForeignKey(
'group',
on_delete=models.CASCADE
)
def __str__(self):
return str(self.permission_set)
class Meta:
db_table = "group_permission"
class kanban_board(models.Model):
kanban_board_id = models.AutoField(primary_key=True)
kanban_board_name = models.CharField(max_length=255)
requirement = models.ForeignKey(
'requirement',
null=True,
blank=True,
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
class Meta:
db_table = "kanban_board"
def __str__(self):
return str(self.kanban_board_name)
class kanban_card(models.Model):
kanban_card_id = models.AutoField(primary_key=True)
kanban_card_text = models.CharField(max_length=255)
kanban_card_sort_number = models.IntegerField()
kanban_level = models.ForeignKey(
'kanban_level',
on_delete=models.CASCADE,
)
kanban_column = models.ForeignKey(
'kanban_column',
on_delete=models.CASCADE,
)
kanban_board = models.ForeignKey(
'kanban_board',
on_delete=models.CASCADE,
)
project = models.ForeignKey(
'project',
on_delete=models.CASCADE,
null=True,
blank=True,
)
task = models.ForeignKey(
'task',
on_delete=models.CASCADE,
null=True,
blank=True,
)
requirement = models.ForeignKey(
'requirement',
on_delete=models.CASCADE,
null=True,
blank=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
class Meta:
db_table = "kanban_card"
def __str__(self):
return str(self.kanban_card_text)
class kanban_column(models.Model):
kanban_column_id = models.AutoField(primary_key=True)
kanban_column_name = models.CharField(max_length=255)
kanban_column_sort_number = models.IntegerField()
kanban_board = models.ForeignKey(
'kanban_board',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
class Meta:
db_table = "kanban_column"
def __str__(self):
return str(self.kanban_column_name)
class kanban_comment(models.Model):
kanban_comment_id = models.AutoField(primary_key=True)
kanban_comment = models.TextField()
kanban_board = models.ForeignKey(
'kanban_board',
on_delete=models.CASCADE,
null=True,
blank=True,
)
kanban_card = models.ForeignKey(
'kanban_card',
on_delete=models.CASCADE,
null=True,
blank=True,
)
user_id = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=True
)
user_infomation = models.CharField(max_length=255)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
class Meta:
db_table = "kanban_comment"
def __str__(self):
return str(self.kanban_comment)
class kanban_level(models.Model):
kanban_level_id = models.AutoField(primary_key=True)
kanban_level_name = models.CharField(max_length=255)
kanban_level_sort_number = models.IntegerField()
kanban_board = models.ForeignKey(
'kanban_board',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
class Meta:
db_table = "kanban_level"
def __str__(self):
return str(self.kanban_level_name)
class list_of_amount_type(models.Model):
amount_type_id = models.AutoField(primary_key=True)
amount_type_description = models.CharField(max_length=20)
list_order = models.IntegerField(unique=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.amount_type_description)
class Meta:
db_table = "list_of_amount_type"
ordering = ['list_order']
class list_of_bug_client(models.Model):
list_of_bug_client_id = models.AutoField(primary_key=True)
bug_client_name = models.CharField(max_length=50)
bug_client_api_url = models.CharField(max_length=255)
# The different API commands
api_open_bugs = models.CharField(max_length=255) # Find all open bugs
api_search_bugs = models.CharField(max_length=255) # Search command
api_bug = models.CharField(max_length=255) # Get that particular bug information - direct link to bug
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.bug_client_name)
class Meta:
db_table = "list_of_bug_client"
class list_of_currency(models.Model):
currency_id = models.AutoField(primary_key=True)
currency_description = models.CharField(max_length=20)
currency_short_description = models.CharField(max_length=4)
list_order = models.IntegerField(unique=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.currency_description)
class Meta:
db_table = "list_of_currency"
class list_of_contact_type(models.Model):
contact_type_id = models.AutoField(primary_key=True)
contact_type = models.CharField(max_length=50)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.contact_type)
class Meta:
db_table = "list_of_contact_type"
class list_of_country(models.Model):
country_id = models.CharField(primary_key=True, max_length=2)
country_name = models.CharField(max_length=50)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.country_name)
class Meta:
db_table = "list_of_country"
class list_of_country_region(models.Model):
region_id = models.AutoField(primary_key=True)
country_id = models.ForeignKey(
'list_of_country',
on_delete=models.CASCADE,
)
region_name = models.CharField(max_length=150)
region_type = models.CharField(
max_length=80,
null=True
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(User, on_delete=models.CASCADE, related_name='%(class)s_change_user', blank=True,
null=True)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.region_name)
class Meta:
db_table = "list_of_country_region"
class list_of_lead_source(models.Model):
lead_source_id = models.AutoField(primary_key=True)
lead_source_description = models.CharField(max_length=20)
list_order = models.IntegerField(unique=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.lead_source_description)
class Meta:
db_table = "list_of_lead_source"
class list_of_opportunity_stage(models.Model):
opportunity_stage_id = models.AutoField(primary_key=True)
opportunity_stage_description = models.CharField(max_length=50)
probability_success = models.DecimalField(
max_digits=3,
decimal_places=0,
)
list_order = models.IntegerField(unique=True)
opportunity_closed = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
user_id = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=True,
blank=True
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.opportunity_stage_description)
class Meta:
db_table = "list_of_opportunity_stage"
ordering = ['list_order']
class list_of_quote_stage(models.Model):
quote_stage_id = models.AutoField(primary_key=True)
quote_stage = models.CharField(
max_length=50,
unique=True,
)
is_invoice = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
quote_closed = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
sort_order = models.IntegerField(unique=True, auto_created=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.quote_stage)
class Meta:
db_table = "list_of_quote_stage"
class list_of_requirement_item_status(models.Model):
requirement_item_status_id = models.AutoField(primary_key=True)
requirement_item_status = models.CharField(
max_length=100,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True,
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.requirement_item_status)
class Meta:
db_table = "list_of_requirement_item_status"
class list_of_requirement_item_type(models.Model):
requirement_item_type_id = models.AutoField(primary_key=True)
requirement_item_type = models.CharField(
max_length=100,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True,
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.requirement_item_type)
class Meta:
db_table = "list_of_requirement_item_type"
class list_of_requirement_status(models.Model):
requirement_status_id = models.AutoField(primary_key=True)
requirement_status = models.CharField(
max_length=50,
)
requirement_status_is_closed = models.CharField(
max_length=10,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True,
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.requirement_status)
class Meta:
db_table = "list_of_requirement_status"
class list_of_requirement_type(models.Model):
requirement_type_id = models.AutoField(primary_key=True)
requirement_type = models.CharField(
max_length=100,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True,
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.requirement_type)
class Meta:
db_table = "list_of_requirement_type"
class list_of_tax(models.Model):
tax_id = models.AutoField(primary_key=True)
tax_amount = models.DecimalField(
max_digits=6,
decimal_places=4,
)
tax_description = models.CharField(
max_length=50,
blank=True,
null=True,
) # Incase the customer wants to place a name against the tax
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True,
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.tax_amount) # No need to encode as it is a decimal point
class Meta:
db_table = "list_of_tax"
class list_of_title(models.Model):
title_id = models.AutoField(primary_key=True)
title = models.CharField(max_length=10)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user',
blank=True,
null=True
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.title)
class Meta:
db_table = "list_of_title"
class opportunity(models.Model):
opportunity_id = models.AutoField(primary_key=True)
opportunity_name = models.CharField(max_length=255)
opportunity_description = HTMLField('oppertunity_description')
organisation_id = models.ForeignKey(
'organisation',
on_delete=models.CASCADE,
null=True,
blank=True,
)
customer_id = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
null=True,
blank=True,
)
currency_id = models.ForeignKey(
'list_of_currency',
on_delete=models.CASCADE,
)
opportunity_amount = models.DecimalField(
max_digits=12,
decimal_places=2
) # Turn into a number widget
amount_type_id = models.ForeignKey(
'list_of_amount_type',
on_delete=models.CASCADE
)
opportunity_expected_close_date = models.DateTimeField()
opportunity_stage_id = models.ForeignKey(
'list_of_opportunity_stage',
on_delete=models.CASCADE
)
opportunity_success_probability = models.IntegerField() # Between 0% and 100%
lead_source_id = models.ForeignKey(
'list_of_lead_source',
on_delete=models.CASCADE
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
user_id = models.ForeignKey(User, on_delete=models.CASCADE)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "opportunities"
class opportunity_permission(models.Model):
opportunity_permission_id = models.AutoField(primary_key=True)
opportunity_id = models.ForeignKey(
'opportunity',
on_delete=models.CASCADE
)
assigned_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_assigned_user',
null=True,
blank=True,
)
group_id = models.ForeignKey(
'group',
on_delete=models.CASCADE,
null=True,
blank=True,
)
all_user = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "opportunity_permission"
class organisation(models.Model):
organisation_id = models.AutoField(primary_key=True)
organisation_name = models.CharField(max_length=255)
organisation_website = models.CharField(max_length=50)
organisation_email = models.CharField(max_length=100)
organisation_profile_picture = models.ImageField(
blank=True,
null=True
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.organisation_name)
class Meta:
db_table = "organisation"
class permission_set_manager(models.Manager):
def get_by_natural_key(
self,
permission_set_id,
permission_set_name,
administration_assign_user_to_group,
administration_create_group,
administration_create_permission_set,
administration_create_user,
assign_campus_to_customer,
associate_project_and_task,
bug,
bug_client,
customer,
email,
invoice,
invoice_product,
kanban,
kanban_card,
opportunity,
organisation,
organisation_campus,
project,
quote,
requirement,
requirement_link,
task,
tax,
document,
contact_history,
kanban_comment,
project_history,
task_history,
):
return self.get(
permission_set_id=permission_set_id,
permission_set_name=permission_set_name,
administration_assign_user_to_group=administration_assign_user_to_group,
administration_create_group=administration_create_group,
administration_create_permission_set=administration_create_permission_set,
administration_create_user=administration_create_user,
assign_campus_to_customer=assign_campus_to_customer,
associate_project_and_task=associate_project_and_task,
bug=bug,
bug_client=bug_client,
customer=customer,
email=email,
invoice=invoice,
invoice_product=invoice_product,
kanban=kanban,
kanban_card=kanban_card,
opportunity=opportunity,
organisation=organisation,
organisation_campus=organisation_campus,
project=project,
quote=quote,
requirement=requirement,
requirement_link=requirement_link,
task=task,
tax=tax,
template=template,
document=document,
contact_history=contact_history,
kanban_comment=kanban_comment,
project_history=project_history,
task_history=task_history,
)
class permission_set(models.Model):
objects = permission_set_manager()
permission_set_id = models.AutoField(primary_key=True)
permission_set_name = models.CharField(
max_length=255,
unique=True,
)
# BASELINE permission
administration_assign_user_to_group = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
administration_create_group = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
administration_create_permission_set = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
administration_create_user = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
assign_campus_to_customer = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
associate_project_and_task = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
bug = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
bug_client = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
email = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
invoice = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
invoice_product = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
customer = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
kanban = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
kanban_card = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
opportunity = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
organisation = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
organisation_campus = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
project = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
quote = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
requirement = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0
)
requirement_link = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0
)
task = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
tax = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
template = models.IntegerField(
choices=PERMISSION_LEVEL,
default=0,
)
"""
ADDITIVE permission
~~~~~~~~~~~~~~~~~~~~
Designed to add on extra abilities to those user who have "READ ONLY" for certain modules.
If a user has the ability to "EDIT" for any of these modules, then this section does not
need to be populated with data.
"""
document = models.IntegerField(
choices=PERMISSION_BOOLEAN,
default=0,
)
contact_history = models.IntegerField(
choices=PERMISSION_BOOLEAN,
default=0,
)
kanban_comment = models.IntegerField(
choices=PERMISSION_BOOLEAN,
default=0,
)
project_history = models.IntegerField(
choices=PERMISSION_BOOLEAN,
default=0,
)
task_history = models.IntegerField(
choices=PERMISSION_BOOLEAN,
default=0,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def natural_key(self):
return (
self.permission_set_id, # 0
self.permission_set_name, # 1
self.administration_assign_user_to_group, # 2
self.administration_create_group, # 3
self.administration_create_permission_set, # 4
self.administration_create_user, # 5
self.assign_campus_to_customer, # 6
self.associate_project_and_task, # 7
self.customer, # 8
self.invoice, # 9
self.invoice_product, # 10
self.opportunity, # 11
self.organisation, # 12
self.organisation_campus, # 13
self.project, # 14
self.requirement, # 15
self.requirement_link, # 16
self.task, # 17
self.document, # 18
self.contact_history, # 19
self.project_history, # 20
self.task_history # 21
)
# class Meta:
# unique_together = (('first_name', 'last_name'),)
def __str__(self):
return str(self.permission_set_name)
class Meta:
db_table = "permission_set"
class product_and_service(models.Model):
"""
For naming convention, product and service will be shorten to
just product. The product name contains both product and service
"""
product_id = models.AutoField(primary_key=True)
product_or_service = models.CharField(
max_length=7,
choices=PRODUCT_OR_SERVICE,
)
product_name = models.CharField(
max_length=100,
unique=True, # To stop the user inputting the same product!
)
product_part_number = models.CharField(
max_length=100,
null=True,
blank=True,
)
product_cost = models.DecimalField(
max_digits=19,
decimal_places=2
)
product_price = models.DecimalField(
max_digits=19,
decimal_places=2,
)
product_description = models.TextField(
blank=True,
null=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.product_name)
class Meta:
db_table = "product_and_service"
class project(models.Model):
project_id = models.AutoField(primary_key=True)
project_name = models.CharField(max_length=255)
project_description = HTMLField('project_description')
organisation_id = models.ForeignKey(
'organisation',
on_delete=models.CASCADE,
blank=True,
null=True,
)
#Only fill this field out if there are no organisation
customer = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
blank=True,
null=True,
)
project_start_date = models.DateTimeField()
project_end_date = models.DateTimeField()
project_status = models.CharField(
max_length=15,
choices=PROJECT_STATUS_CHOICE,
default='New'
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.project_name)
class Meta:
db_table = "project"
class project_customer(models.Model):
project_customer_id = models.AutoField(primary_key=True)
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
)
customer_id = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
)
customer_description = models.CharField(
max_length=255,
null=True,
blank=True
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "project_customer"
class project_group(models.Model):
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
)
group_id = models.ForeignKey(
'group',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "project_group"
class project_history(models.Model):
project_history_id = models.AutoField(primary_key=True)
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
)
user_id = models.ForeignKey(
User,
on_delete=models.CASCADE,
null=True
)
user_infomation = models.CharField(max_length=255)
project_history = HTMLField('project_history')
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.project_id)
class Meta:
db_table = "project_history"
class project_opportunity(models.Model):
project_opprtunity_id = models.AutoField(primary_key=True)
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
)
opportunity_id = models.ForeignKey(
'opportunity',
on_delete=models.CASCADE,
)
opportunity_description = models.CharField(
max_length=255,
null=True,
blank=True
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "project_opportunity"
class project_stage(models.Model):
project_stage_id = models.AutoField(primary_key=True)
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
)
stage_id = models.ForeignKey(
'stage',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "project_stage"
class project_task(models.Model):
project_task_id = models.AutoField(primary_key=True)
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
db_column='project_id'
)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
db_column='task_id'
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "project_task"
class quote(models.Model):
quote_id = models.AutoField(primary_key=True)
quote_uuid = models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=False,
unique=True,
)
quote_title = models.CharField(max_length=255)
quote_valid_till = models.DateTimeField()
quote_stage_id = models.ForeignKey(
'list_of_quote_stage',
on_delete=models.CASCADE,
)
quote_billing_address=models.ForeignKey(
'campus',
on_delete=models.CASCADE,
null=True,
blank=True,
)
is_invoice = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
quote_approval_status_id = models.CharField(
max_length=10,
choices=QUOTE_APPROVAL_STATUS,
default='DRAFT',
)
quote_terms = HTMLField(
null=True,
blank=True,
)
customer_notes = HTMLField(
null=True,
blank=True,
)
project_id = models.ForeignKey(
'project',
on_delete=models.CASCADE,
db_column='project_id',
null=True,
blank=True,
)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
db_column='task_id',
null=True,
blank=True,
)
opportunity_id = models.ForeignKey(
'opportunity',
on_delete=models.CASCADE,
db_column='opportunity_id',
null=True,
blank=True,
)
customer_id = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
db_column='customer_id',
null=True,
blank=True,
)
organisation_id = models.ForeignKey(
'organisation',
on_delete=models.CASCADE,
db_column='organisation_id',
null=True,
blank=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.quote_title)
class Meta:
db_table = "quote"
class quote_permission(models.Model):
quote_permission_id = models.AutoField(primary_key=True)
quote = models.ForeignKey(
'quote',
on_delete=models.CASCADE
)
assigned_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_assigned_user',
null=True,
blank=True,
)
group_id = models.ForeignKey(
'group',
on_delete=models.CASCADE,
null=True,
blank=True,
)
all_user = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "quote_permission"
class quote_product_and_service(models.Model):
quotes_product_and_service_id = models.AutoField(primary_key=True)
quote = models.ForeignKey(
'quote',
on_delete=models.CASCADE,
)
product_and_service = models.ForeignKey(
'product_and_service',
on_delete=models.CASCADE,
)
# Price of the product BEFORE Discounts
product_price = models.DecimalField(
max_digits=19,
decimal_places=2,
)
quantity = models.IntegerField()
product_description = models.CharField(
max_length=255,
blank=True,
null=True,
)
product_cost = models.DecimalField(
max_digits=19,
decimal_places=2
)
discount_choice = models.CharField(
max_length=10,
choices=DISCOUNT_CHOICE,
default='PERCENTAGE',
)
discount_percent = models.DecimalField(
default=0,
max_digits=5,
decimal_places=2,
validators=[MaxValueValidator(100), MinValueValidator(0)] # Could I use this for the money too? :D
)
discount_amount = models.DecimalField(
default=0,
max_digits=19,
decimal_places=2,
validators=[MaxValueValidator(1000000000), MinValueValidator(0)] # Could I use this for the money too? :D
)
# The price of the product AFTER discounts
sales_price = models.DecimalField(
default=0,
max_digits=19,
decimal_places=2,
validators=[MaxValueValidator(1000000000), MinValueValidator(0)] # Could I use this for the money too? :D
)
tax = models.ForeignKey(
'list_of_tax',
on_delete=models.CASCADE,
null=True,
blank=True,
)
tax_amount = models.DecimalField(
max_digits=19,
decimal_places=2,
default=0,
)
total = models.DecimalField(
max_digits=19,
decimal_places=2,
validators=[MaxValueValidator(99999999999999999999), MinValueValidator(-99999999999999999999)],
)
product_note = models.CharField(
max_length=255,
null=True,
blank=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.quotes_product_and_service_id) + "| " + self.product_description
class Meta:
db_table = "quote_product_and_service"
class quote_responsible_customer(models.Model):
quote_responsible_customer_id = models.AutoField(primary_key=True)
quote_id = models.ForeignKey(
'quote',
on_delete=models.CASCADE,
)
customer_id = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "quote_responsible_customer"
class quote_template(models.Model):
quote_template_id=models.AutoField(primary_key=True)
quote_template_description=models.CharField(
max_length=255,
)
template_css=models.TextField(
null=True,
blank=True,
)
header=HTMLField(
null=True,
blank=True,
)
#To clarify - this is YOUR company
company_letter_head=HTMLField(
null=True,
blank=True,
)
payment_terms=models.CharField(
max_length=255,
null=True,
blank=True,
)
notes=models.CharField(
max_length=255,
null=True,
blank=True,
)
#The Organisation's details you are sending the quote to
organisation_details=HTMLField(
null=True,
blank=True,
)
#For project/service lines - it will store the order of fields in as a variable :)
product_line=models.TextField()
service_line=models.TextField()
payment_method=HTMLField(
null=True,
blank=True,
)
footer=HTMLField(
null=True,
blank=True,
)
# Landscape/Portrait
# Margins - left, right, top, bottom, header, footer
page_layout=models.CharField(
max_length=50,
choices=PAGE_LAYOUT,
default='Landscape',
)
margin_left=models.IntegerField(
default=1,
)
margin_right = models.IntegerField(
default=1,
)
margin_top = models.IntegerField(
default=1,
)
margin_bottom = models.IntegerField(
default=1,
)
margin_header = models.IntegerField(
default=1,
)
margin_footer = models.IntegerField(
default=1,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return self.quote_template_description
class Meta:
db_table = "quote_template"
class requirement(models.Model):
requirement_id = models.AutoField(primary_key=True)
requirement_title = models.CharField(
max_length=255,
)
requirement_scope = HTMLField(
null=True,
blank=True,
)
requirement_type = models.ForeignKey(
'list_of_requirement_type',
on_delete=models.CASCADE,
)
requirement_status = models.ForeignKey(
'list_of_requirement_status',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.requirement_title)
class Meta:
db_table = "requirement"
class requirement_item(models.Model):
requirement_item_id = models.AutoField(primary_key=True)
requirement_id = models.ForeignKey(
'requirement',
on_delete=models.CASCADE,
)
requirement_item_title = models.CharField(max_length=255)
requirement_item_scope = models.TextField(
null=True,
blank=True,
)
requirement_item_status = models.ForeignKey(
'list_of_requirement_item_status',
on_delete=models.CASCADE,
)
requirement_item_type = models.ForeignKey(
'list_of_requirement_item_type',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.requirement_item_title)
class Meta:
db_table = "requirement_item"
class requirement_item_link(models.Model):
requirement_item_link_id = models.AutoField(primary_key=True)
requirement_item = models.ForeignKey(
'requirement_item',
on_delete=models.CASCADE,
)
project_id = models.ForeignKey(
'project',
blank=True,
null=True,
on_delete=models.CASCADE,
)
task_id = models.ForeignKey(
'task',
blank=True,
null=True,
on_delete=models.CASCADE,
)
organisation_id = models.ForeignKey(
'organisation',
blank=True,
null=True,
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "requirement_item_permission"
class requirement_link(models.Model):
requirement_link_id = models.AutoField(primary_key=True)
requirement = models.ForeignKey(
'requirement',
on_delete=models.CASCADE,
)
project_id = models.ForeignKey(
'project',
blank=True,
null=True,
on_delete=models.CASCADE,
)
task_id = models.ForeignKey(
'task',
blank=True,
null=True,
on_delete=models.CASCADE,
)
organisation_id = models.ForeignKey(
'organisation',
blank=True,
null=True,
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "requirement_link"
class requirement_permission(models.Model):
requirement_permission_id = models.AutoField(primary_key=True)
requirement = models.ForeignKey(
'requirement',
on_delete=models.CASCADE
)
assigned_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_assigned_user',
null=True,
blank=True,
)
group_id = models.ForeignKey(
'group',
on_delete=models.CASCADE,
null=True,
blank=True,
)
all_user = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE',
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "requirement_permission"
class stage(models.Model):
stage_id = models.AutoField(primary_key=True)
group_id = models.ForeignKey(
'group',
on_delete=models.CASCADE,
)
stage = models.CharField(max_length=45)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.stage)
class Meta:
db_table = "stage"
class task(models.Model):
task_id = models.AutoField(primary_key=True)
task_short_description = models.CharField(max_length=255)
task_long_description = HTMLField()
organisation_id = models.ForeignKey(
'organisation',
on_delete=models.CASCADE,
null=True,
blank=True,
)
task_start_date = models.DateTimeField()
task_end_date = models.DateTimeField()
task_assigned_to = models.ForeignKey(
User,
null=True,
blank=True,
on_delete=models.CASCADE,
)
task_status = models.CharField(
max_length=15,
choices=PROJECT_STATUS_CHOICE,
default='New'
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return str(self.task_short_description)
class Meta:
db_table = "task"
class task_action(models.Model):
task_action_id = models.AutoField(primary_key=True)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
)
task_action = models.TextField()
submitted_by = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "task_action"
class task_customer(models.Model):
task_customer_id = models.AutoField(primary_key=True)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
)
customer_id = models.ForeignKey(
'customer',
on_delete=models.CASCADE,
)
customer_description = models.CharField(
max_length=155,
null=True,
blank=True
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "task_customer"
class task_group(models.Model):
task_group_id = models.AutoField(primary_key=True)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
)
group_id = models.ForeignKey(
'group',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "task_group"
class task_history(models.Model):
task_history_id = models.AutoField(primary_key=True)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
)
user_id = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
user_infomation = models.CharField(max_length=255)
task_history = models.TextField()
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "task_history"
class task_opportunity(models.Model):
task_opportunity_id = models.AutoField(primary_key=True)
task_id = models.ForeignKey(
'task',
on_delete=models.CASCADE,
)
opportunity_id = models.ForeignKey(
'opportunity',
on_delete=models.CASCADE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "task_opportunity"
class to_do(models.Model):
to_do_id = models.AutoField(primary_key=True)
to_do = models.CharField(
max_length=255,
)
to_do_completed = models.BooleanField(default=False)
project = models.ForeignKey(
'project',
on_delete=models.CASCADE,
null=True,
blank=True,
)
task = models.ForeignKey(
'task',
on_delete=models.CASCADE,
null=True,
blank=True,
)
opportunity = models.ForeignKey(
'opportunity',
on_delete=models.CASCADE,
null=True,
blank=True,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "to_do"
class user_group(models.Model):
user_group_id = models.AutoField(primary_key=True)
username = models.ForeignKey(
User,
on_delete=models.CASCADE,
)
group = models.ForeignKey(
'group',
on_delete=models.CASCADE,
)
permission_set = models.ForeignKey(
'permission_set',
on_delete=models.CASCADE,
)
report_to=models.ForeignKey(
User,
related_name='report_to',
on_delete=models.CASCADE,
null=True,
blank=True,
)
group_leader=models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default="FALSE",
)
#report to
# group leader - true/false
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
class Meta:
db_table = "user_group"
class user_want(models.Model):
user_want_id=models.AutoField(
primary_key=True,
)
want_choice=models.CharField(
max_length=50,
choices=WANT_CHOICE,
)
want_choice_text=models.CharField(
max_length=50,
)
want_skill=models.CharField(
max_length=50,
choices=SKILL_CHOICE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return self.want_choice_text
class Meta:
db_table = "user_want"
class user_weblink(models.Model):
user_weblink_id=models.AutoField(primary_key=True)
user_weblink_url=models.URLField(max_length=255)
user_weblink_source=models.CharField(
max_length=50,
choices=WEBSITE_SOURCE,
)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
change_user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='%(class)s_change_user'
)
is_deleted = models.CharField(
max_length=5,
choices=IS_DELETED_CHOICE,
default='FALSE'
)
def __str__(self):
return self.user_weblinks_url
class Meta:
db_table = "user_weblink" |
# Copyright (c) Chris Choy (chrischoy@ai.stanford.edu).
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
# Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
# of the code.
import torch
import torch.nn as nn
from torch.optim import SGD
import MinkowskiEngine as ME
from MinkowskiEngine.modules.resnet_block import BasicBlock, Bottleneck
from examples.resnet import ResNetBase
class BasicBlockShallow(nn.Module):
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
bn_momentum=0.1,
dimension=-1):
super(BasicBlockShallow, self).__init__()
assert dimension > 0
self.conv1 = ME.MinkowskiConvolution(
inplanes, planes, kernel_size=1, stride=stride, dilation=dilation, dimension=dimension)
self.norm1 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.conv2 = ME.MinkowskiConvolution(
planes, planes, kernel_size=1, stride=1, dilation=dilation, dimension=dimension)
self.norm2 = ME.MinkowskiBatchNorm(planes, momentum=bn_momentum)
self.relu = ME.MinkowskiReLU(inplace=True)
self.downsample = downsample
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.norm2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class MinkUNetBase(ResNetBase):
BLOCK = None
PLANES = None
DILATIONS = (1, 1, 1, 1, 1, 1, 1, 1)
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
INIT_DIM = 32
OUT_TENSOR_STRIDE = 1
# To use the model, must call initialize_coords before forward pass.
# Once data is processed, call clear to reset the model before calling
# initialize_coords
def __init__(self, in_channels=3, out_channels=20, bn_momentum=0.1, D=3):
self.bn_momentum=bn_momentum
ResNetBase.__init__(self, in_channels, out_channels, D)
def network_initialization(self, in_channels, out_channels, D):
# Output of the first conv concated to conv6
self.inplanes = self.INIT_DIM
self.conv0p1s1 = ME.MinkowskiConvolution(
in_channels, self.inplanes, kernel_size=5, dimension=D)
self.bn0 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.conv1p1s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn1 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block1 = self._make_layer(self.BLOCK, self.PLANES[0],
self.LAYERS[0])
self.conv2p2s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn2 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block2 = self._make_layer(self.BLOCK, self.PLANES[1],
self.LAYERS[1])
self.conv3p4s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn3 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block3 = self._make_layer(self.BLOCK, self.PLANES[2],
self.LAYERS[2])
self.conv4p8s2 = ME.MinkowskiConvolution(
self.inplanes, self.inplanes, kernel_size=2, stride=2, dimension=D)
self.bn4 = ME.MinkowskiBatchNorm(self.inplanes, momentum=self.bn_momentum)
self.block4 = self._make_layer(self.BLOCK, self.PLANES[3],
self.LAYERS[3])
self.convtr4p16s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[4], kernel_size=2, stride=2, dimension=D)
self.bntr4 = ME.MinkowskiBatchNorm(self.PLANES[4], momentum=self.bn_momentum)
self.inplanes = self.PLANES[4] + self.PLANES[2] * self.BLOCK.expansion
self.block5 = self._make_layer(self.BLOCK, self.PLANES[4],
self.LAYERS[4])
self.convtr5p8s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[5], kernel_size=2, stride=2, dimension=D)
self.bntr5 = ME.MinkowskiBatchNorm(self.PLANES[5], momentum=self.bn_momentum)
self.inplanes = self.PLANES[5] + self.PLANES[1] * self.BLOCK.expansion
self.block6 = self._make_layer(self.BLOCK, self.PLANES[5],
self.LAYERS[5])
self.convtr6p4s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[6], kernel_size=2, stride=2, dimension=D)
self.bntr6 = ME.MinkowskiBatchNorm(self.PLANES[6], momentum=self.bn_momentum)
self.inplanes = self.PLANES[6] + self.PLANES[0] * self.BLOCK.expansion
self.block7 = self._make_layer(self.BLOCK, self.PLANES[6],
self.LAYERS[6])
self.convtr7p2s2 = ME.MinkowskiConvolutionTranspose(
self.inplanes, self.PLANES[7], kernel_size=2, stride=2, dimension=D)
self.bntr7 = ME.MinkowskiBatchNorm(self.PLANES[7], momentum=self.bn_momentum)
self.inplanes = self.PLANES[7] + self.INIT_DIM
self.block8 = self._make_layer(self.BLOCK, self.PLANES[7],
self.LAYERS[7])
self.final = ME.MinkowskiConvolution(
self.PLANES[7] * self.BLOCK.expansion,
out_channels,
kernel_size=1,
bias=True,
dimension=D)
self.relu = ME.MinkowskiReLU(inplace=True)
def forward(self, x):
# print(in_field)
out = self.conv0p1s1(x)
out = self.bn0(out)
out_p1 = self.relu(out)
out = self.conv1p1s2(out_p1)
out = self.bn1(out)
out = self.relu(out)
out_b1p2 = self.block1(out)
out = self.conv2p2s2(out_b1p2)
out = self.bn2(out)
out = self.relu(out)
out_b2p4 = self.block2(out)
out = self.conv3p4s2(out_b2p4)
out = self.bn3(out)
out = self.relu(out)
out_b3p8 = self.block3(out)
# tensor_stride=16
out = self.conv4p8s2(out_b3p8)
out = self.bn4(out)
out = self.relu(out)
out = self.block4(out)
# tensor_stride=8
out = self.convtr4p16s2(out)
out = self.bntr4(out)
out = self.relu(out)
out = ME.cat(out, out_b3p8)
out = self.block5(out)
# tensor_stride=4
out = self.convtr5p8s2(out)
out = self.bntr5(out)
out = self.relu(out)
out = ME.cat(out, out_b2p4)
out = self.block6(out)
# tensor_stride=2
out = self.convtr6p4s2(out)
out = self.bntr6(out)
out = self.relu(out)
out = ME.cat(out, out_b1p2)
out = self.block7(out)
# tensor_stride=1
out = self.convtr7p2s2(out)
out = self.bntr7(out)
out = self.relu(out)
out = ME.cat(out, out_p1)
out = self.block8(out)
out = self.final(out)
return out
@staticmethod
def add_argparse_args(parent_parser):
parser = parent_parser.add_argument_group("MinkUNet")
# parser.add_argument("--in_channels", type=int, default=3)
# parser.add_argument("--out_channels", type=int, default=32)
return parent_parser
def convert_sync_batchnorm(self):
self = ME.MinkowskiSyncBatchNorm.convert_sync_batchnorm(self)
class MinkUNet14(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (1, 1, 1, 1, 1, 1, 1, 1)
class MinkUNet18(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 2, 2, 2, 2, 2, 2, 2)
class MinkUNet34(MinkUNetBase):
BLOCK = BasicBlock
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet34Shallow(MinkUNetBase):
BLOCK = BasicBlockShallow
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet50(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 6, 2, 2, 2, 2)
class MinkUNet101(MinkUNetBase):
BLOCK = Bottleneck
LAYERS = (2, 3, 4, 23, 2, 2, 2, 2)
class MinkUNet14A(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet14B(MinkUNet14):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet14C(MinkUNet14):
PLANES = (32, 64, 128, 256, 192, 192, 128, 128)
class MinkUNet14D(MinkUNet14):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet18A(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 96, 96)
class MinkUNet18B(MinkUNet18):
PLANES = (32, 64, 128, 256, 128, 128, 128, 128)
class MinkUNet18D(MinkUNet18):
PLANES = (32, 64, 128, 256, 384, 384, 384, 384)
class MinkUNet34A(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 64)
class MinkUNet34B(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 64, 32)
class MinkUNet34C(MinkUNet34):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96)
class MinkUNet34CShallow(MinkUNet34Shallow):
PLANES = (32, 64, 128, 256, 256, 128, 96, 96) |
# -*- coding: utf-8 -*-
import numpy as np
import pytest
try:
from mgmetis.parmetis import part_geom
from mpi4py import MPI
comm = MPI.COMM_WORLD
has_mpi = True
except (ImportError, ModuleNotFoundError):
has_mpi = False
@pytest.mark.skipif(not has_mpi or comm.size != 2, reason="invalid parallel env")
def test_partgeom():
try:
part = part_geom(np.random.rand(100, 3))
assert np.max(np.asarray(comm.allgather(part))) == 1
except BaseException as e:
import sys
print(e, file=sys.stderr, flush=True)
comm.Abort(1)
|
# Form implementation generated from reading ui file 'wiznewtext.ui'
#
# Created: Mon Dec 23 20:49:57 2002
# by: The PyQt User Interface Compiler (pyuic)
#
# WARNING! All changes made in this file will be lost!
import sys
from qt import *
class wizNewText(QWizard):
def __init__(self,parent = None,name = None,modal = 0,fl = 0):
QWizard.__init__(self,parent,name,modal,fl)
if not name:
self.setName("wizNewText")
self.pageRecord = QWidget(self,"pageRecord")
pageRecordLayout = QGridLayout(self.pageRecord,1,1,11,6,"pageRecordLayout")
self.txtTitle = QLineEdit(self.pageRecord,"txtTitle")
self.txtTitle.setMaxLength(255)
pageRecordLayout.addWidget(self.txtTitle,0,3)
self.txtDescription = QMultiLineEdit(self.pageRecord,"txtDescription")
pageRecordLayout.addWidget(self.txtDescription,6,3)
self.txtUrl = QLineEdit(self.pageRecord,"txtUrl")
pageRecordLayout.addWidget(self.txtUrl,1,3)
self.cmbLanguage = QComboBox(0,self.pageRecord,"cmbLanguage")
pageRecordLayout.addWidget(self.cmbLanguage,2,3)
self.cmbRecording = QComboBox(0,self.pageRecord,"cmbRecording")
pageRecordLayout.addWidget(self.cmbRecording,3,3)
self.cmbScan = QComboBox(0,self.pageRecord,"cmbScan")
pageRecordLayout.addWidget(self.cmbScan,4,3)
self.txtTranscriptionDate = QLineEdit(self.pageRecord,"txtTranscriptionDate")
self.txtTranscriptionDate.setMaxLength(12)
pageRecordLayout.addWidget(self.txtTranscriptionDate,5,3)
self.lblDataHelp = QLabel(self.pageRecord,"lblDataHelp")
self.lblDataHelp.setMaximumSize(QSize(175,32767))
self.lblDataHelp.setAlignment(QLabel.WordBreak | QLabel.AlignTop | QLabel.AlignLeft)
pageRecordLayout.addMultiCellWidget(self.lblDataHelp,0,6,0,0)
self.lblUrl = QLabel(self.pageRecord,"lblUrl")
self.lblUrl.setSizePolicy(QSizePolicy(5,5,0,0,self.lblUrl.sizePolicy().hasHeightForWidth()))
pageRecordLayout.addWidget(self.lblUrl,1,2)
self.lblLanguage = QLabel(self.pageRecord,"lblLanguage")
pageRecordLayout.addWidget(self.lblLanguage,2,2)
self.lblScan = QLabel(self.pageRecord,"lblScan")
pageRecordLayout.addWidget(self.lblScan,4,2)
self.lblRecording = QLabel(self.pageRecord,"lblRecording")
pageRecordLayout.addWidget(self.lblRecording,3,2)
self.lblTitle = QLabel(self.pageRecord,"lblTitle")
pageRecordLayout.addWidget(self.lblTitle,0,2)
self.lblDescription = QLabel(self.pageRecord,"lblDescription")
self.lblDescription.setAlignment(QLabel.AlignTop | QLabel.AlignLeft)
pageRecordLayout.addWidget(self.lblDescription,6,2)
self.lblTranscriptionDate = QLabel(self.pageRecord,"lblTranscriptionDate")
pageRecordLayout.addWidget(self.lblTranscriptionDate,5,2)
self.Line1 = QFrame(self.pageRecord,"Line1")
self.Line1.setFrameShape(QFrame.VLine)
self.Line1.setFrameShadow(QFrame.Sunken)
self.Line1.setFrameShape(QFrame.VLine)
pageRecordLayout.addMultiCellWidget(self.Line1,0,6,1,1)
self.addPage(self.pageRecord,"")
self.pageParsing = QWidget(self,"pageParsing")
pageParsingLayout = QGridLayout(self.pageParsing,1,1,11,6,"pageParsingLayout")
self.txtStreamRegExp = QLineEdit(self.pageParsing,"txtStreamRegExp")
pageParsingLayout.addWidget(self.txtStreamRegExp,1,4)
self.txtMorphemeRegExp = QLineEdit(self.pageParsing,"txtMorphemeRegExp")
pageParsingLayout.addWidget(self.txtMorphemeRegExp,3,4)
self.txtElementRegExp = QLineEdit(self.pageParsing,"txtElementRegExp")
pageParsingLayout.addWidget(self.txtElementRegExp,2,4)
self.lblParseHelp = QLabel(self.pageParsing,"lblParseHelp")
self.lblParseHelp.setMaximumSize(QSize(175,32767))
self.lblParseHelp.setAlignment(QLabel.WordBreak | QLabel.AlignTop | QLabel.AlignLeft)
pageParsingLayout.addMultiCellWidget(self.lblParseHelp,0,7,0,0)
self.Line2 = QFrame(self.pageParsing,"Line2")
self.Line2.setFrameShape(QFrame.VLine)
self.Line2.setFrameShadow(QFrame.Sunken)
self.Line2.setFrameShape(QFrame.VLine)
pageParsingLayout.addMultiCellWidget(self.Line2,0,7,1,2)
self.chkLexLookup = QCheckBox(self.pageParsing,"chkLexLookup")
self.chkLexLookup.setChecked(1)
pageParsingLayout.addMultiCellWidget(self.chkLexLookup,5,5,3,4)
self.chkElmtLookup = QCheckBox(self.pageParsing,"chkElmtLookup")
self.chkElmtLookup.setChecked(1)
pageParsingLayout.addMultiCellWidget(self.chkElmtLookup,4,4,3,4)
self.grpParser = QGroupBox(self.pageParsing,"grpParser")
self.grpParser.setColumnLayout(0,Qt.Vertical)
self.grpParser.layout().setSpacing(6)
self.grpParser.layout().setMargin(11)
grpParserLayout = QGridLayout(self.grpParser.layout())
grpParserLayout.setAlignment(Qt.AlignTop)
self.rdbRegExp = QRadioButton(self.grpParser,"rdbRegExp")
self.rdbRegExp.setChecked(1)
grpParserLayout.addWidget(self.rdbRegExp,0,0)
self.rdbXML = QRadioButton(self.grpParser,"rdbXML")
self.rdbXML.setEnabled(0)
grpParserLayout.addWidget(self.rdbXML,1,0)
pageParsingLayout.addMultiCellWidget(self.grpParser,0,0,3,4)
self.lblStreamRegExp = QLabel(self.pageParsing,"lblStreamRegExp")
pageParsingLayout.addWidget(self.lblStreamRegExp,1,3)
self.lblElementRegexp = QLabel(self.pageParsing,"lblElementRegexp")
pageParsingLayout.addWidget(self.lblElementRegexp,2,3)
self.lblMorphemeRegExp = QLabel(self.pageParsing,"lblMorphemeRegExp")
pageParsingLayout.addWidget(self.lblMorphemeRegExp,3,3)
self.addPage(self.pageParsing,"")
self.pageText = QWidget(self,"pageText")
pageTextLayout = QGridLayout(self.pageText,1,1,11,6,"pageTextLayout")
self.bnLoad = QPushButton(self.pageText,"bnLoad")
pageTextLayout.addWidget(self.bnLoad,0,1)
self.lblTextHelp = QLabel(self.pageText,"lblTextHelp")
self.lblTextHelp.setAlignment(QLabel.WordBreak | QLabel.AlignTop | QLabel.AlignLeft)
pageTextLayout.addWidget(self.lblTextHelp,0,0)
self.progressParsing = QProgressBar(self.pageText,"progressParsing")
self.progressParsing.setFrameShape(QProgressBar.Panel)
pageTextLayout.addMultiCellWidget(self.progressParsing,2,2,0,1)
self.txtRawText = QMultiLineEdit(self.pageText,"txtRawText")
pageTextLayout.addMultiCellWidget(self.txtRawText,1,1,0,1)
self.addPage(self.pageText,"")
self.pageParseTree = QWidget(self,"pageParseTree")
pageParseTreeLayout = QGridLayout(self.pageParseTree,1,1,11,6,"pageParseTreeLayout")
self.lsvParseTree = QListView(self.pageParseTree,"lsvParseTree")
self.lsvParseTree.addColumn(self.tr("Type"))
self.lsvParseTree.addColumn(self.tr("Text"))
self.lsvParseTree.addColumn(self.tr("Lexeme"))
self.lsvParseTree.setRootIsDecorated(1)
pageParseTreeLayout.addWidget(self.lsvParseTree,0,2)
self.lblParseTreeHelp = QLabel(self.pageParseTree,"lblParseTreeHelp")
self.lblParseTreeHelp.setMaximumSize(QSize(175,32767))
self.lblParseTreeHelp.setAlignment(QLabel.WordBreak | QLabel.AlignTop | QLabel.AlignLeft)
pageParseTreeLayout.addWidget(self.lblParseTreeHelp,0,0)
self.Line4 = QFrame(self.pageParseTree,"Line4")
self.Line4.setFrameShape(QFrame.VLine)
self.Line4.setFrameShadow(QFrame.Sunken)
self.Line4.setFrameShape(QFrame.VLine)
pageParseTreeLayout.addWidget(self.Line4,0,1)
self.addPage(self.pageParseTree,"")
self.pageProjects = QWidget(self,"pageProjects")
pageProjectsLayout = QGridLayout(self.pageProjects,1,1,11,6,"pageProjectsLayout")
self.lsbProjects = QListBox(self.pageProjects,"lsbProjects")
self.lsbProjects.setSelectionMode(QListBox.Extended)
pageProjectsLayout.addWidget(self.lsbProjects,0,2)
self.lblProjectChooser = QLabel(self.pageProjects,"lblProjectChooser")
self.lblProjectChooser.setMaximumSize(QSize(175,32767))
self.lblProjectChooser.setAlignment(QLabel.WordBreak | QLabel.AlignTop | QLabel.AlignLeft)
pageProjectsLayout.addWidget(self.lblProjectChooser,0,0)
self.Line6 = QFrame(self.pageProjects,"Line6")
self.Line6.setFrameShape(QFrame.VLine)
self.Line6.setFrameShadow(QFrame.Sunken)
self.Line6.setFrameShape(QFrame.VLine)
pageProjectsLayout.addWidget(self.Line6,0,1)
self.addPage(self.pageProjects,"")
self.pageReady = QWidget(self,"pageReady")
pageReadyLayout = QGridLayout(self.pageReady,1,1,11,6,"pageReadyLayout")
self.lblDone = QLabel(self.pageReady,"lblDone")
lblDone_font = QFont(self.lblDone.font())
lblDone_font.setPointSize(24)
lblDone_font.setBold(1)
lblDone_font.setItalic(1)
self.lblDone.setFont(lblDone_font)
self.lblDone.setAlignment(QLabel.AlignCenter)
pageReadyLayout.addWidget(self.lblDone,0,0)
self.lblDoneHelp = QLabel(self.pageReady,"lblDoneHelp")
self.lblDoneHelp.setAlignment(QLabel.WordBreak | QLabel.AlignTop | QLabel.AlignLeft)
pageReadyLayout.addWidget(self.lblDoneHelp,1,0)
self.addPage(self.pageReady,"")
self.languageChange()
self.resize(QSize(649,416).expandedTo(self.minimumSizeHint()))
self.setTabOrder(self.txtTitle,self.txtUrl)
self.setTabOrder(self.txtUrl,self.cmbLanguage)
self.setTabOrder(self.cmbLanguage,self.cmbRecording)
self.setTabOrder(self.cmbRecording,self.cmbScan)
self.setTabOrder(self.cmbScan,self.txtTranscriptionDate)
self.setTabOrder(self.txtTranscriptionDate,self.txtDescription)
self.setTabOrder(self.txtDescription,self.rdbRegExp)
self.setTabOrder(self.rdbRegExp,self.rdbXML)
self.setTabOrder(self.rdbXML,self.txtStreamRegExp)
self.setTabOrder(self.txtStreamRegExp,self.txtElementRegExp)
self.setTabOrder(self.txtElementRegExp,self.txtMorphemeRegExp)
self.setTabOrder(self.txtMorphemeRegExp,self.chkElmtLookup)
self.setTabOrder(self.chkElmtLookup,self.chkLexLookup)
self.setTabOrder(self.chkLexLookup,self.bnLoad)
self.setTabOrder(self.bnLoad,self.txtRawText)
self.setTabOrder(self.txtRawText,self.lsvParseTree)
self.setTabOrder(self.lsvParseTree,self.lsbProjects)
self.lblUrl.setBuddy(self.txtUrl)
self.lblLanguage.setBuddy(self.cmbLanguage)
self.lblScan.setBuddy(self.cmbScan)
self.lblRecording.setBuddy(self.cmbRecording)
self.lblTitle.setBuddy(self.txtTitle)
self.lblDescription.setBuddy(self.txtDescription)
def languageChange(self):
self.setCaption(self.tr("Create a new text"))
QWhatsThis.add(self.txtTranscriptionDate,self.tr("This is the date the text was first transcribed."))
self.lblDataHelp.setText(self.tr("Enter the basic descriptive data for this text: the title, a freeform description, links to sound recordings and manuscript scans, the basic language of the text and the date the text was transcribed.\n"
"\n"
"The URL can be a link to a website connected with the text. \n"
"\n"
"The language is the basic language of the text: individual phrases and words can be marked to be in another language."))
self.lblUrl.setText(self.tr("&URL"))
self.lblLanguage.setText(self.tr("&Language"))
self.lblScan.setText(self.tr("&Scan"))
self.lblRecording.setText(self.tr("&Recording"))
self.lblTitle.setText(self.tr("&Title"))
self.lblDescription.setText(self.tr("&Description"))
self.lblTranscriptionDate.setText(self.tr("Date"))
self.setTitle(self.pageRecord,self.tr("Basic data"))
self.txtStreamRegExp.setText(self.tr("[.!?\\12]"))
self.txtMorphemeRegExp.setText(self.tr("[.]"))
self.txtElementRegExp.setText(self.tr("[ ,:;]"))
self.lblParseHelp.setText(self.tr("New textual data can be entered in one of two ways: by parsing plain text using regular expressions that look for punctuation of special marks.\n"
"\n"
"Regular expressions can parse a text on three levels: phrases, words and morphemes. \n"
"\n"
"After parsing, the texts must be tagged. \n"
"\n"
"If you want to parse XML text, you must currently use\n"
"the command line."))
self.chkLexLookup.setText(self.tr("Lookup words and morphemes in the lexicon."))
self.chkElmtLookup.setText(self.tr("Lookup words and morphemes in other texts"))
self.grpParser.setTitle(self.tr("Parser"))
self.rdbRegExp.setText(self.tr("Parse using regular expressions"))
self.rdbXML.setText(self.tr("Parse XML-markup text"))
self.lblStreamRegExp.setText(self.tr("Stream regular expression"))
self.lblElementRegexp.setText(self.tr("Word regular expression"))
self.lblMorphemeRegExp.setText(self.tr("Morpheme regular expression"))
self.setTitle(self.pageParsing,self.tr("Determine parsing"))
self.bnLoad.setText(self.tr("Load from File"))
self.lblTextHelp.setText(self.tr("Either paste a text (plain text in utf-8 Unicode encoding), or open a text with the Load from File button."))
self.setTitle(self.pageText,self.tr("Enter the prepared text"))
self.lsvParseTree.header().setLabel(0,self.tr("Type"))
self.lsvParseTree.header().setLabel(1,self.tr("Text"))
self.lsvParseTree.header().setLabel(2,self.tr("Lexeme"))
self.lblParseTreeHelp.setText(self.tr("You can check the result of the parsing in this treeview. It shows the streams, words and morphemes, and lexemes and morphemes found by these items.\n"
"\n"
"If you want to make corrections, please edit the prepared text and parse again."))
self.setTitle(self.pageParseTree,self.tr("Check parse tree"))
self.lblProjectChooser.setText(self.tr("Pick all projects this text is relevant to. You can make an extended selection by pressing CTRL and clicking on a project."))
self.setTitle(self.pageProjects,self.tr("Choose projects"))
self.lblDone.setText(self.tr("Done!"))
self.lblDoneHelp.setText(self.tr("The new text has been parsed; you can now press Finish, and it will be entered in the Kura database. From that moment you can edit and analyze it in the Kura desktop client."))
self.setTitle(self.pageReady,self.tr("Ready"))
if __name__ == "__main__":
a = QApplication(sys.argv)
QObject.connect(a,SIGNAL("lastWindowClosed()"),a,SLOT("quit()"))
w = wizNewText()
a.setMainWidget(w)
w.show()
a.exec_loop()
|
# Generated by Django 3.0.3 on 2020-09-02 14:13
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('rcsystem', '0047_delete_post'),
]
operations = [
migrations.RenameModel(
old_name='BookCategory',
new_name='DBBookCategory',
),
]
|
from queue import Empty, Queue
import threading
from flask import (
Flask, request, Response, send_file, render_template
)
import torch
torch.backends.cudnn.benchmark = True
from torchvision import transforms, utils
from util import *
import os
import glob
import copy
import time
from io import BytesIO
from distutils.util import strtobool
import numpy as np
from model import *
from e4e_projection import projection as e4e_projection
app = Flask(__name__)
latent_dim = 512
device = "cuda" if torch.cuda.is_available() else "cpu"
original_generator = Generator(1024, latent_dim, 8, 2).to(device)
ckpt = torch.load('models/stylegan2-ffhq-config-f.pt', map_location=lambda storage, loc: storage)
original_generator.load_state_dict(ckpt['g_ema'], strict=False)
mean_latent = original_generator.mean_latent(10000)
generator = copy.deepcopy(original_generator)
transform = transforms.Compose(
[
transforms.Resize((1024, 1024)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
toPILImage = transforms.ToPILImage()
pretrained_models = {
"arcane_caitlyn": torch.load(os.path.join('models', 'arcane_caitlyn.pt'), map_location=lambda storage, loc: storage),
"arcane_caitlyn_preserve_color": torch.load(os.path.join('models', 'arcane_caitlyn_preserve_color.pt'), map_location=lambda storage, loc: storage),
"arcane_jinx_preserve_color": torch.load(os.path.join('models', 'arcane_jinx_preserve_color.pt'), map_location=lambda storage, loc: storage),
"arcane_jinx": torch.load(os.path.join('models', 'arcane_jinx.pt'), map_location=lambda storage, loc: storage),
"arcane_multi_preserve_color": torch.load(os.path.join('models', 'arcane_multi_preserve_color.pt'), map_location=lambda storage, loc: storage),
"arcane_multi": torch.load(os.path.join('models', 'arcane_multi.pt'), map_location=lambda storage, loc: storage),
"art": torch.load(os.path.join('models', 'art.pt'), map_location=lambda storage, loc: storage),
"disney_preserve_color": torch.load(os.path.join('models', 'disney_preserve_color.pt'), map_location=lambda storage, loc: storage),
"disney": torch.load(os.path.join('models', 'disney.pt'), map_location=lambda storage, loc: storage),
"jojo_preserve_color": torch.load(os.path.join('models', 'jojo_preserve_color.pt'), map_location=lambda storage, loc: storage),
"jojo": torch.load(os.path.join('models', 'jojo.pt'), map_location=lambda storage, loc: storage),
"jojo_yasuho_preserve_color": torch.load(os.path.join('models', 'jojo_yasuho_preserve_color.pt'), map_location=lambda storage, loc: storage),
"jojo_yasuho": torch.load(os.path.join('models', 'jojo_yasuho.pt'), map_location=lambda storage, loc: storage),
"supergirl_preserve_color": torch.load(os.path.join('models', 'supergirl_preserve_color.pt'), map_location=lambda storage, loc: storage),
"supergirl": torch.load(os.path.join('models', 'supergirl.pt'), map_location=lambda storage, loc: storage),
}
requestsQueue = Queue()
BATCH_SIZE = 1
CHECK_INTERVAL = 0.1
def handle_requests_by_batch():
while True:
requestsBatch = []
while not (len(requestsBatch) >= BATCH_SIZE):
try:
requestsBatch.append(requestsQueue.get(timeout=CHECK_INTERVAL))
except Empty:
continue
for request in requestsBatch:
request['output'] = run(request['input'][0], request['input'][1])
def run(file, pretrained):
try:
filepath = f'input/{file.filename}'
file.save(filepath)
name = strip_path_extension(filepath)+'.pt'
aligned_face = align_face(filepath)
my_w = e4e_projection(aligned_face, name, device).unsqueeze(0)
ckpt = pretrained_models[pretrained]
generator.load_state_dict(ckpt["g"], strict=False)
seed = 3000
torch.manual_seed(seed)
with torch.no_grad():
generator.eval()
my_sample = generator(my_w, input_is_latent=True)
result = utils.make_grid(my_sample, normalize=True, range=(-1, 1))
result = np.squeeze(result)
result_image = toPILImage(result)
buffer_out = BytesIO()
result_image.save(buffer_out, format=f'{file.content_type.split("/")[-1]}')
buffer_out.seek(0)
deleteFileList = glob.glob(f"input/{file.filename.split('.')[0]}.*")
for deleteFile in deleteFileList:
os.remove(deleteFile)
return buffer_out
except AssertionError as e:
return "Detected Error"
except Exception as e:
return "error"
threading.Thread(target=handle_requests_by_batch).start()
@app.route('/jojogan', methods=['POST'])
def jojogan():
if requestsQueue.qsize() > BATCH_SIZE:
return Response('Too Many Requests', status=429)
try:
file = request.files['file']
pretrained = request.form['pretrained']
except:
return Response("Empty Field", status=400)
if pretrained not in pretrained_models:
return Response("Model Not Found", status=404)
req = {
'input': [file, pretrained]
}
requestsQueue.put(req)
while 'output' not in req:
time.sleep(CHECK_INTERVAL)
io = req['output']
if io == "error":
return Response('Server Error', status=500)
elif io == "Detected Error":
return Response('Face Not Deteced, try another face image', status=500)
return send_file(io, mimetype=file.content_type)
@app.route('/health', methods=['GET'])
def health_check():
return "ok"
@app.route('/', methods=['GET'])
def main():
return render_template('index.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port="5000") |
"""
LYNGUA LANGUAGE LEARNING EXPERIENCE
Copyright (c) 2021 by SilentByte <https://silentbyte.com/>
"""
# Deprecated and planning to delete
import json
import base64
import logging
import requests
from typing import List
from dataclasses import dataclass
from lyngua_api import settings
log = logging.getLogger(__name__)
AUDIO_MAX_LENGTH = 60
class LynguaError(Exception):
pass
def _dict_to_base64_json(data: dict) -> str:
return base64.b64encode(json.dumps(data).encode('utf-8')).decode('utf-8')
def _request_speech_token() -> str:
response = requests.post(
"https://eastus.api.cognitive.microsoft.com/sts/v1.0/issuetoken",
headers={
'Ocp-Apim-Subscription-Key': settings.AZURE_SPEECH_API_KEY,
},
)
response.raise_for_status()
return response.content.decode('utf-8')
@dataclass
class Word:
text: str
display: str
offset: float
duration: float
@dataclass
class ScoredWord:
text: str
display: str
offset: float
duration: float
accuracy: float
error: str
def speech_to_text(wav_data: bytes) -> List[Word]:
token = _request_speech_token()
response = requests.post(
url="https://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?language=en-US&format=detailed&wordLevelTimestamps=true&profanity=raw",
data=wav_data,
headers={
'Content-Type': 'audio/wav',
'Authorization': f'Bearer {token}',
'Accept': 'application/json',
'Ocp-Apim-Subscription-Key': settings.AZURE_SPEECH_API_KEY,
},
)
response.raise_for_status()
speech_data: dict = response.json()
log.info('Speech to Text API returned data', extra=speech_data)
if speech_data.get('RecognitionStatus') != 'Success':
raise LynguaError('Speech API returned an error')
return [
Word(
text=w['Word'],
display=w['Word'], # TODO: Figure out a way to extract/correlate words with punctuation.
offset=w['Offset'] / 10_000_000,
duration=w['Duration'] / 10_000_000,
)
for w in speech_data['NBest'][0]['Words']
]
def score_pronunciation(reference_text: str, wav_data: bytes) -> List[ScoredWord]:
token = _request_speech_token()
assessment = {
'ReferenceText': reference_text,
'GradingSystem': 'HundredMark',
'Granularity': 'FullText',
'Dimension': 'Comprehensive',
}
response = requests.post(
url="https://eastus.stt.speech.microsoft.com/speech/recognition/conversation/cognitiveservices/v1?language=en-US&format=detailed&wordLevelTimestamps=true&profanity=raw",
data=wav_data,
headers={
'Content-Type': 'audio/wav',
'Authorization': f'Bearer {token}',
'Accept': 'application/json',
'Ocp-Apim-Subscription-Key': settings.AZURE_SPEECH_API_KEY,
'Pronunciation-Assessment': _dict_to_base64_json(assessment),
},
)
response.raise_for_status()
score_data: dict = response.json()
log.info('Pronunciation Score API returned data', extra=score_data)
if score_data.get('RecognitionStatus') != 'Success':
raise LynguaError('Speech API returned an error')
return [
ScoredWord(
text=w['Word'],
display=w['Word'], # TODO: Figure out a way to extract/correlate words with punctuation.
offset=w['Offset'] / 10_000_000,
duration=w['Duration'] / 10_000_000,
accuracy=w['AccuracyScore'] / 100,
error=w['ErrorType'].lower(),
)
for w in score_data['NBest'][0]['Words']
]
|
from __future__ import absolute_import
import numpy
import scipy.ndimage.filters
from six.moves import range
def kirsch(image):
convolution_mask = [5, -3, -3, -3, -3, -3, 5, 5]
derivatives = numpy.zeros(image.shape)
kernel = numpy.zeros((3, 3), image.dtype)
kindex = numpy.array([[0, 1, 2], [7, -1, 3], [6, 5, 4]])
for _ in range(len(convolution_mask)):
kernel[kindex >= 0] = numpy.array(convolution_mask)[kindex[kindex >= 0]]
derivatives = numpy.maximum(
derivatives, scipy.ndimage.filters.convolve(image, kernel)
)
convolution_mask = convolution_mask[-1:] + convolution_mask[:-1]
return derivatives
|
# -*- coding: utf-8 -*-
# This file as well as the whole tspreprocess package are licenced under the MIT licence (see the LICENCE.txt)
# Maximilian Christ (maximilianchrist.com), 2017
from __future__ import absolute_import, division
import numpy as np
from tsfresh import extract_features
from tsfresh.utilities.dataframe_functions import _normalize_input_to_internal_representation
# todo: maybe compress is not the right term? because we are losing information
# todo: add support for generic numpy methods as aggregation function
# todo: add support for custom aggregation functions
def compress(ts, compression_functions, interval_length, column_id, column_sort, column_kind, column_value):
"""
This method compresses time series by applying a compression function on bins. Then the values of the compression
function over the bins are returned as a new, compressed time series.
This decreasing the memory footprint of the time series. E.g. by applying a singular compression function on chunks
of size 10, the time series is compressed by a factor 10.
It is also possible to use multiple compression functions.
The time series container ts must be in one of the formats that are supported by the tsfresh package.
:param ts: The pandas.DataFrame with the time series to compute the features for, or a dictionary of pandas.DataFrames.
:type ts: pandas.DataFrame or dict
:param compression_functions: mapping from feature calculator names to parameters. See tsfresh documentation
:type compression_functions: dict
:param interval_length: the length of each bin to which the aggregation functions are applied
:type interval_length: int
:param column_id: The name of the id column to group by.
:type column_id: str
:param column_sort: The name of the sort column.
:type column_sort: str
:param column_kind: The name of the column keeping record on the kind of the value.
:type column_kind: str
:param column_value: The name for the column keeping the value itself.
:type column_value: str
"""
dd, column_id, column_kind, column_value = \
_normalize_input_to_internal_representation(ts, column_id, column_sort, column_kind, column_value)
def create_bins(v):
n_bins = np.ceil(len(v) / interval_length)
return np.repeat(np.arange(n_bins), interval_length)[:len(v)]
dd[column_id] = dd[column_id].apply(str) + "_bin_" + \
dd.groupby([column_id, column_kind])[column_value].transform(create_bins).apply(str)
dd = extract_features(dd,
column_id=column_id,
column_value=column_value,
column_kind=column_kind,
default_fc_parameters=compression_functions)
dd.columns = [x.replace("__", "_") for x in dd.columns]
dd.columns = [x.replace("feature", "map") for x in dd.columns]
dd.reset_index(drop=False, inplace=True)
ids = dd[column_id].str.split("_bin_").apply(lambda s: s[0])
bin_number = dd["id"].str.split("_bin_").apply(lambda s: eval(s[1]))
dd[column_id] = ids
dd["bin"] = bin_number
return dd.sort_values(by=[column_id, "bin"])
# todo: add references to SCADA sources
# add unit tests for this method
def compress_SCADA_like(ts, interval_length, column_id, column_sort, column_kind, column_value):
"""
Takes a tsfresh compatible time series container and performs compression by calculating max, min, mean and
variance of each time series.
This is a common compression technique for SCADA (Supervisory Control and Data Acquisition) systems, deployed in
Industrial environments.
"""
return compress(ts=ts,
compression_functions={"minimum": None, "maximum": None, "mean": None, "variance": None},
intervall_lenght=interval_length,
column_id=column_id, column_sort=column_sort, column_kind=column_kind, column_value=column_value)
def make_SAX():
pass |
#!/usr/bin/env python
# Pre-processing script that reads Geant4 output from a particular source
# and saves it to a uniform-format FITS file encoding the energy deposited
# in WFI pixels for each primary particle. This is for the MIT-generated
# Geant4 output, which should be sorted into directories for different
# runs. The directory name is expected as an argument, and it must contain
# sets of files of the following naming convention:
#
# *_EvtLog_*.gdat
# *_StepLog_*.gdat
#
# The EvtLog file contains summary information of each generated primary,
# and the StepLog file contains the energy deposition steps.
#
# EDM Tue Mar 9 13:11:35 EST 2021
# Added SECPARTYPE, which records the particle type of the particle that
# entered the detector and ultimately led to the energy deposition. This
# was mostly added for the MIT version, and so for OU we just copy
# PARTYPE to the SECPARTYPE column, since (I think) that's what it is
# anyway. Otherwise the event finder would fail.
#
# EDM Mon Feb 8 14:36:42 EST 2021
# Updated pixel ACTX/ACTY offsets for new OU data (vs. the data provided
# by Jonathan Keelan in 2018). Also now assumes same 512x512 active region
# for each DEPFET as the MIT version, which comes from
# the document 'DEPFET_sensitive_area_v7.pdf'.
#
# EDM Thu Jan 14 14:46:40 EST 2021
# Now ignores pixels with no signal.
#
# EDM Fri Jan 8 14:44:29 EST 2021
# First version to read OU format of Geant4 output. Adapted from
# 'convert_to_fits_mit.py'. Changed output column EDEP to ENERGY.
# Probably will break stuff.
import numpy as np
import astropy
import os, sys, glob, re
from astropy.table import Table
from astropy.io import fits
import pandas as pd
num_args = len(sys.argv) - 1
if num_args < 1:
sys.exit(f"Usage: {sys.argv[0]} <path/to/Geant4 input file> ...")
# functions
def match(regex: str, string: str):
return re.compile(regex).search(string)
def wfits(data, fitsfile, ow=True, hdr=None, hdrcomments=None):
if isinstance(data, dict):
pass
elif isinstance(data, tuple):
t = Table(data[1], names = data[0])
hdu = fits.table_to_hdu(t)
if hdr:
for key, val in hdr.items():
hdu.header[key] = val
if hdrcomments:
for key, val in hdrcomments.items():
hdu.header.comments[key] = val
else:
hdu = fits.PrimaryHDU()
hdu.data = data
hdu.writeto(fitsfile, overwrite = ow)
# These things are Geant4-source-specific, so should be
# written to header of rawpix file. Probably.
sphere_radius = 70. # radius of boundary sphere in cm
#numprims_gen = int(1e6) # num. primaries per run
# now calculated by rounding off largest primid to one sig digit
array_size = int(1e7)
primary_type = 'proton'
# Updated EDM Mon Feb 8 14:24:12 EST 2021
# Active DEPFET regions are the following (same as MIT):
# ACTX ACTY
# D 59:570 52:563
# C 59:570 610:1121
# A 632:1143 52:563
# B 632:1143 610:1121
# Current OU coords run: X=-592:+582, Y=-807:+397, inclusive.
# This appears to contain 1175x1205 130-µm pixels, as expected
# from 'DEPFET_sensitive_area_v7.pdf'. So I think I just need
# to shift them so LLC is at X,Y=0,0, and then filter out the
# same active DEPFET parts as in the MIT version.
x_offset = 592
y_offset = 807
# Old OU offsets (for J. Keelan data from 2018):
#x_offset = 513
#y_offset = 513
# Not used but left here for safe-keeping:
#imgsize = 1027
#actmin = 0
#actmax = 1026
#xydep_min = -513
#xydep_max = 513
# dictionary of particle types
# these are the same codes as the PDG in MPE data
# and are used for both primary type and particle
# (primary or secondary) that deposits energy;
# can be bit-wise added for multiple secondaries
# depositing in the same pixel for the same primary
ptypes = { 'other' : 0,
'proton' : 1,
'electron' : 2,
'gamma' : 4,
'e+' : 8,
'pi-' : 16,
'pi+' : 32,
'neutron' : 64,
'alpha' : 128 }
# loop through input filenames
for filename in sys.argv[1:] :
if not re.search('.*[0-9]+_detector0(\.gz)*$', filename) :
print(f'### Error: file {filename} does not look like an OU Geatn4 file, skipping.')
continue
if not os.path.isfile(filename) :
print(f'### Error reading file {filename}, skipping.')
continue
path = os.path.dirname(filename)
this_runid = int( re.sub('.*?([0-9]+)_detector0(\.gz)*$', r'\1', filename) )
outfile = os.path.join(path, f'rawpix_{this_runid}.fits')
print(f'### Converting run {this_runid}.')
print(f'### Input file {filename}.')
print(f'### Output file {outfile}.')
# there should be 4 Geant4 input files, one for each quadrant
infiles = glob.glob(f"{path}/{this_runid}_detector?")
print(f'{infiles}')
if len(infiles) != 4:
print (f"### Found something other than 4 datafiles for run {this_runid}, skipping.")
continue
# allocate output arrays, which are pixel-based
pix_primid = np.zeros(array_size, dtype=np.uint32)
pix_detid = np.zeros(array_size, dtype=np.uint8) + 255 # just to make sure this gets set
pix_actx = np.zeros(array_size, dtype=np.int16)
pix_acty = np.zeros(array_size, dtype=np.int16)
pix_energy = np.zeros(array_size, dtype=np.float32)
pix_partype = np.zeros(array_size, dtype=np.uint16)
pix_primtype = np.zeros(array_size, dtype=np.uint8) + ptypes[primary_type]
pix_runid = np.zeros(array_size, dtype=np.uint16) + this_runid
this_pix = 0
# loop through the four quadrant data files for this run
# and combine all four, since single primary can produce signal
# in multiple quadrants
for infile in infiles:
print(f"### Reading {infile}")
rc = match('[0-9]+_detector([0-9])', infile) #extracts the detector name
this_detector = int(rc.group(1))
ptype = {}
with open(infile, 'r') as IN:
# step through the input file and accumulate primaries
for line in IN:
if match('^\s*#', line): #skip comments
continue
if match('^\s*$', line): #skip blank lines
continue
if not match(',', line): #could be if ',' not in line
continue
fields = line.rstrip().split(',')
# if the first column is a string, then this is a particle line
if match('[a-zA-Z]', fields[0]):
# retain the primary for this interaction
this_primid = int(float(fields[1]))
# particle type is a dict so that the pixel-specific read can pick them up;
# they are indexed by the secondary particle ID, and it doesn't matter if the
# particle ID is re-used from primary to primary, since this will reset it
ptype[int(fields[2])] = fields[0]
# if the first column is a number, then this is a pixel hit line
else:
# skip it if it's outside the 512x512 region of a quad
this_x, this_y = int(fields[0]), int(fields[1])
#if this_x<xydep_min or this_y<xydep_min or this_x>xydep_max or this_y>xydep_max:
# continue
# skip it if the energy deposited is zero
this_energy = float(fields[2])
if (this_energy <= 0) :
continue
pix_actx[this_pix] = this_x + x_offset
pix_acty[this_pix] = this_y + y_offset
pix_energy[this_pix] = this_energy
pix_detid[this_pix] = this_detector
pix_primid[this_pix] = this_primid
# ptype is a dict of particle type strings indexed by the id
# ptypes is (constant) dict of my own particle type IDs indexed
# by the string (confused yet?)
this_tmp = ptype.get(int(fields[3]))
#print(f'{this_primid}, {fields[3]}, {this_tmp}')
pix_partype[this_pix] = ptypes.get( ptype.get(int(fields[3]), 'other'), 0 )
this_pix += 1
# done primary-by-primary processing
# eliminate signal outside of the 512-pixel active region of each DEPFET,
# as defined above
# ACTX ACTY
# D 59:570 52:563
# C 59:570 610:1121
# A 632:1143 52:563
# B 632:1143 610:1121
# remove outer ring
indx = (pix_actx>=59) & (pix_actx<=1143) & (pix_acty>=52) & (pix_acty<=1121)
pix_primid = pix_primid[indx]
pix_detid = pix_detid[indx]
pix_actx = pix_actx[indx]
pix_acty = pix_acty[indx]
pix_energy = pix_energy[indx]
pix_partype = pix_partype[indx]
pix_primtype = pix_primtype[indx]
pix_runid = pix_runid[indx]
# remove the gaps
indx = ( (pix_actx<=570) | (pix_actx>=632) ) & ( (pix_acty<=563) | (pix_acty>=610) )
pix_primid = pix_primid[indx]
pix_detid = pix_detid[indx]
pix_actx = pix_actx[indx]
pix_acty = pix_acty[indx]
pix_energy = pix_energy[indx]
pix_partype = pix_partype[indx]
pix_primtype = pix_primtype[indx]
pix_runid = pix_runid[indx]
# lop off unused parts of arrays
numpix = this_pix
pix_primid = pix_primid[0:numpix]
pix_detid = pix_detid[0:numpix]
pix_actx = pix_actx[0:numpix]
pix_acty = pix_acty[0:numpix]
pix_energy = pix_energy[0:numpix]
pix_partype = pix_partype[0:numpix]
pix_primtype = pix_primtype[0:numpix]
pix_runid = pix_runid[0:numpix]
# theoretically, the number of primaries generated for a run
# should be the highest primid rounded to one significant digit
def RoundToSigDig (x, a) :
return round( x, (-int(np.floor(np.log10(abs(x)))) + (a-1)) )
numprims_gen = int( RoundToSigDig(pix_primid.max(), 1) )
# done loop through quadrant data files for this run
uniq_primid = np.unique(pix_primid)
numprims_interact = uniq_primid.size
pct_interact = 100. * numprims_interact / numprims_gen
print(f"### Run {this_runid}: generated {numprims_gen} primaries.")
print(f"### Run {this_runid}: found {numprims_interact} primaries ({pct_interact}%) that interacted.")
print(f"### Run {this_runid}: found {numpix} pixels with deposited energy")
# add header keywords
hdr = {
'SPH_RAD' : sphere_radius,
'NPRI_GEN' : numprims_gen,
'NPRI_INT' : numprims_interact }
hdrcomments = {
'SPH_RAD' : 'Radius of Geant4 source sphere in cm',
'NPRI_GEN' : 'Number of primaries generated',
'NPRI_INT' : 'Number of primaries producing signal' }
# make a table and save it to a FITS HDU
wfits( (['PRIMID', 'DETID', 'ACTX', 'ACTY', 'ENERGY', 'PARTYPE', 'SECPARTYPE', 'PRIMTYPE', 'RUNID'],
[pix_primid.astype(np.uint32),
pix_detid.astype(np.uint8),
pix_actx.astype(np.int16),
pix_acty.astype(np.int16),
pix_energy.astype(np.single),
pix_partype.astype(np.uint8),
pix_partype.astype(np.uint8),
pix_primtype.astype(np.uint8),
pix_runid.astype(np.uint64)]),
outfile, hdr=hdr, hdrcomments=hdrcomments)
exit()
|
import os
from elasticsearchutils import searchByStatus, buildElasticSearchUrl
ES_ENDPOINT = os.environ.get('ES_ENDPOINT', 'http://aa41f5f30e2f011e8bde30674acac93e-1024276836.us-west-2.elb.amazonaws.com:9200')
ES_INDEX = 'documents'
es_url_status_search=buildElasticSearchUrl(ES_ENDPOINT, ES_INDEX)
es_url_status_update=buildElasticSearchUrl(ES_ENDPOINT, ES_INDEX, isSearch=False)
tt= searchByStatus(es_url_status_update, "initial", True, 10) |
from __future__ import annotations
import random
import numpy as np
import pytest
from torchvision import datasets # type: ignore[import]
from edutorch.typing import NPArray, NPIntArray
@pytest.fixture(autouse=True)
def _set_random_seed() -> None:
random.seed(0)
np.random.seed(0)
@pytest.fixture(scope="session")
def fashion_mnist(
num_train: int = 100, num_test: int = 10
) -> tuple[NPArray, NPIntArray, NPArray, NPIntArray]:
train_set = datasets.FashionMNIST("data/", train=True, download=True)
test_set = datasets.FashionMNIST("data/", train=False, download=True)
X_train, y_train = train_set.data.numpy().astype(float), train_set.targets.numpy()
X_test, y_test = test_set.data.numpy().astype(float), test_set.targets.numpy()
mask = list(range(num_train))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
mean_image = np.mean(X_train, axis=0)
X_train -= mean_image
X_test -= mean_image
X_train = np.reshape(X_train, (num_train, -1))
X_test = np.reshape(X_test, (num_test, -1))
return X_train, y_train, X_test, y_test
|
# -*- coding: utf-8 -*-
"""
Authors: Tim Hessels
Module: Collect/MYD11
Description:
This module downloads MYD11 LST data from
http://e4ftl01.cr.usgs.gov/. Use the MYD11.LST function to
download and create daily LST images in Gtiff format.
The data is available between 2000-02-18 till present.
Examples:
from pyWAPOR.Collect import MYD11
MYD11.LST(Dir='C:/Temp3/', Startdate='2003-12-01', Enddate='2003-12-30',
latlim=[41, 45], lonlim=[-8, -5])
"""
from .LST import main as LST
__all__ = ['LST']
__version__ = '0.1'
|
""" Unit tests for ``wheezy.http.cacheprofile``.
"""
import unittest
from datetime import datetime, timedelta
from unittest.mock import Mock, patch
from wheezy.core.datetime import parse_http_datetime
from wheezy.http.cacheprofile import ( # isort:skip
CACHEABILITY,
CacheProfile,
RequestVary,
SUPPORTED,
)
class SupportedCacheabilityTestCase(unittest.TestCase):
"""Test the ``SUPPORTED``."""
def test_supported_cacheability(self):
"""Ensure valid supported cacheability options."""
assert "none" in SUPPORTED
assert "server" in SUPPORTED
assert "client" in SUPPORTED
assert "both" in SUPPORTED
assert "public" in SUPPORTED
assert 5 == len(SUPPORTED)
def test_mapping_between_profile_and_policy(self):
"""Ensure mapping between cache profile and
HTTP cache policy is valid.
"""
from wheezy.http.cachepolicy import SUPPORTED
assert set(SUPPORTED) == set(CACHEABILITY.values())
def test_not_supported(self):
"""Raise ``ValueError`` in cache policy is not supported."""
self.assertRaises(AssertionError, lambda: CacheProfile("x"))
class CacheProfileTestCase(unittest.TestCase):
"""Test the ``CacheProfile`` class."""
def test_not_enabled(self):
"""cache profile not enabled."""
profile = CacheProfile("none", enabled=False)
assert not profile.enabled
assert profile.cache_policy() is None
def test_location_none(self):
"""none cache profile."""
profile = CacheProfile("none")
assert not profile.request_vary
policy = profile.cache_policy()
headers = []
policy.extend(headers)
assert [
("Cache-Control", "no-cache"),
("Pragma", "no-cache"),
("Expires", "-1"),
] == headers
def test_location_server(self):
"""server cache profile."""
profile = CacheProfile("server", duration=100)
assert profile.request_vary
policy = profile.cache_policy()
headers = []
policy.extend(headers)
assert [
("Cache-Control", "no-cache"),
("Pragma", "no-cache"),
("Expires", "-1"),
] == headers
@patch(
"wheezy.http.cacheprofile.RequestVary.__init__",
Mock(return_value=None),
)
def test_request_vary(self):
"""request vary initialization."""
vary_query = ["q1", "q2"]
vary_form = ["f1", "f2"]
vary_cookies = ["c1", "c2"]
vary_environ = ["e1", "e2"]
profile = CacheProfile(
"server",
duration=100,
vary_query=vary_query,
vary_form=vary_form,
vary_cookies=vary_cookies,
vary_environ=vary_environ,
)
request_vary = profile.request_vary
request_vary.__init__.assert_called_once_with(
query=vary_query,
form=vary_form,
cookies=vary_cookies,
environ=vary_environ,
)
def test_location_client(self):
"""client cache profile."""
profile = CacheProfile("client", duration=100)
assert not profile.request_vary
policy = profile.cache_policy()
assert "private" == policy.cacheability
assert 100 == policy.max_age_delta
now = parse_http_datetime(policy.http_last_modified)
assert now < datetime.utcnow()
expires = now + timedelta(seconds=profile.duration)
assert expires == parse_http_datetime(policy.http_expires)
headers = []
policy.extend(headers)
assert [
("Cache-Control", "private, max-age=100"),
("Expires", policy.http_expires),
("Last-Modified", policy.http_last_modified),
] == headers
def test_location_both(self):
"""both cache profile."""
profile = CacheProfile("both", duration=100, http_vary=["Cookie"])
assert profile.request_vary
policy = profile.cache_policy()
assert "private" == policy.cacheability
assert 100 == policy.max_age_delta
now = parse_http_datetime(policy.http_last_modified)
assert now < datetime.utcnow()
expires = now + timedelta(seconds=profile.duration)
assert expires == parse_http_datetime(policy.http_expires)
headers = []
policy.extend(headers)
assert [
("Cache-Control", "private, max-age=100"),
("Expires", policy.http_expires),
("Last-Modified", policy.http_last_modified),
("Vary", "Cookie"),
] == headers
def test_location_public(self):
"""public cache profile."""
profile = CacheProfile("public", duration=100)
assert profile.request_vary
policy = profile.cache_policy()
assert "public" == policy.cacheability
assert 100 == policy.max_age_delta
now = parse_http_datetime(policy.http_last_modified)
assert now < datetime.utcnow()
expires = now + timedelta(seconds=profile.duration)
assert expires == parse_http_datetime(policy.http_expires)
headers = []
policy.extend(headers)
assert [
("Cache-Control", "public, max-age=100"),
("Expires", policy.http_expires),
("Last-Modified", policy.http_last_modified),
] == headers
def test_no_store(self):
"""no_store."""
for location in ["none", "server"]:
profile = CacheProfile(location, no_store=True, duration=100)
policy = profile.cache_policy()
headers = []
policy.extend(headers)
assert [
("Cache-Control", "no-cache, no-store"),
("Pragma", "no-cache"),
("Expires", "-1"),
] == headers
for location in ["client", "both"]:
profile = CacheProfile(
location, no_store=True, duration=100, http_max_age=60
)
policy = profile.cache_policy()
headers = []
policy.extend(headers)
assert [
("Cache-Control", "private, no-store, max-age=60"),
("Expires", policy.http_expires),
("Last-Modified", policy.http_last_modified),
] == headers
for location in ["public"]:
profile = CacheProfile(
location, no_store=True, duration=100, http_max_age=0
)
policy = profile.cache_policy()
headers = []
policy.extend(headers)
assert [
("Cache-Control", "public, no-store, max-age=0"),
("Expires", policy.http_expires),
("Last-Modified", policy.http_last_modified),
] == headers
def test_invalid_duration(self):
"""check invalid duration."""
for location in ["server", "client", "both", "public"]:
self.assertRaises(
ValueError, lambda: CacheProfile(location, duration=0)
)
def test_http_max_age(self):
"""check http max age."""
for location in ["server", "client", "both", "public"]:
p = CacheProfile(location, duration=10)
assert 10 == p.http_max_age
p = CacheProfile(location, duration=10, http_max_age=20)
assert 20 == p.http_max_age
p = CacheProfile(location, duration=10, http_max_age=0)
assert 0 == p.http_max_age
for location in ["client", "both", "public"]:
p = CacheProfile(location, duration=10)
policy = p.client_policy()
assert policy
assert isinstance(policy.modified, datetime)
assert 10 == policy.max_age_delta
p = CacheProfile(location, duration=10, http_max_age=0)
policy = p.cache_policy()
assert policy
assert isinstance(policy.modified, datetime)
assert policy.http_last_modified == policy.http_expires
assert 0 == policy.max_age_delta
class RequestVaryTestCase(unittest.TestCase):
"""Test the ``RequestVary`` class."""
def test_init_default_vary(self):
"""Default vary strategy is request_key."""
request_vary = RequestVary()
assert request_vary.request_key == request_vary.key
def test_init_vary_parts(self):
"""Ensure each vary part (query, form, etc) is added to the
vary part strategy.
"""
query = ["q1", "q3", "q2"]
form = ["f1", "f3", "f2"]
cookies = ["c1", "c3", "c2"]
environ = ["e1", "e3", "e2"]
request_vary = RequestVary(
query=query, form=form, cookies=cookies, environ=environ
)
assert 5 == len(request_vary.vary_parts)
assert request_vary.request_key == request_vary.vary_parts[0]
assert request_vary.key_query == request_vary.vary_parts[1]
assert ("q1", "q2", "q3") == request_vary.query
assert request_vary.key_form == request_vary.vary_parts[2]
assert ("f1", "f2", "f3") == request_vary.form
assert request_vary.key_cookies == request_vary.vary_parts[3]
assert ("c1", "c2", "c3") == request_vary.cookies
assert request_vary.key_environ == request_vary.vary_parts[4]
assert ("e1", "e2", "e3") == request_vary.environ
def test_key_default_vary(self):
"""Check key for default vary strategy."""
request_vary = RequestVary()
mock_request = Mock()
mock_request.method = "GET"
mock_request.environ = {"PATH_INFO": "/welcome"}
assert "G/welcome" == request_vary.key(mock_request)
def test_key_vary_parts(self):
"""Check key for vary part strategy."""
query = ["q1", "q3", "q2"]
form = ["f1", "f3", "f2"]
cookies = ["c1", "c3", "c2"]
environ = ["e1", "e3", "e2"]
request_vary = RequestVary(
query=query, form=form, cookies=cookies, environ=environ
)
mock_request = Mock()
mock_request.method = "GET"
mock_request.environ = {"PATH_INFO": "/welcome"}
mock_request.query = {"q1": ["1"]}
mock_request.form = {"f2": ["2"]}
mock_request.cookies = {"c3": "3"}
key = request_vary.key(mock_request)
assert "G/welcomeQN1XXFXN2XCXXN3EXXX" == key
|
from abjad import *
from presentation import *
### PRE ###
def show_demo():
talea = rhythmmakertools.Talea(
counts=[1, 2, 3],
denominator=16,
)
tie_specifier = rhythmmakertools.TieSpecifier(
tie_across_divisions=True,
)
burnish_specifier = rhythmmakertools.BurnishSpecifier(
left_classes=(Rest, Note),
left_counts=(1,),
)
talea_rhythm_maker = rhythmmakertools.TaleaRhythmMaker(
talea=talea,
extra_counts_per_division=[0, 1, 1],
burnish_specifier=burnish_specifier,
tie_specifier=tie_specifier,
)
divisions = [(3, 8), (5, 4), (1, 4), (13, 16)]
score = Score()
for i in range(8):
selections = talea_rhythm_maker(divisions, rotation=i)
voice = Voice(selections)
staff = Staff([voice], context_name='RhythmicStaff')
score.append(staff)
divisions = sequencetools.rotate_sequence(divisions, 1)
lilypond_file = make_sketch_lilypond_file(score)
show(lilypond_file)
#show_demo()
### EXAMPLE ONE ###
note_rhythm_maker = rhythmmakertools.NoteRhythmMaker()
divisions = [(3, 8), (5, 4), (1, 4), (13, 16)]
selections = note_rhythm_maker(divisions)
for selection in selections:
selection
staff = Staff(selections, context_name='RhythmicStaff')
#show(staff)
from presentation import *
sketch = make_sketch(note_rhythm_maker, divisions)
#show(sketch)
divisions_b = [(5, 16), (3, 8), (3, 8), (5, 8), (1, 4)]
sketch = make_sketch(note_rhythm_maker, divisions_b)
#show(sketch)
divisions_b *= 20
sketch = make_sketch(note_rhythm_maker, divisions_b)
#show(sketch)
import random
random_numerators = [random.randrange(1, 16 + 1) for x in range(100)]
random_divisions = [(x, 32) for x in random_numerators]
sketch = make_sketch(note_rhythm_maker, random_divisions)
### EXAMPLE TWO ###
talea = rhythmmakertools.Talea(
counts=[1, 2, 3],
denominator=16,
)
#for i in range(20):
# print(i, talea[i])
### INITIAL TALEA RHYTHM-MAKER
talea_rhythm_maker = rhythmmakertools.TaleaRhythmMaker(talea=talea)
divisions # remind ourselves of original divisions
sketch = make_sketch(talea_rhythm_maker, divisions)
#show(staff)
### SPECIFIERS
tie_specifier = rhythmmakertools.TieSpecifier(
tie_across_divisions=True,
)
burnish_specifier = rhythmmakertools.BurnishSpecifier(
left_classes=[Rest],
left_counts=[1, 0],
)
extra_counts_per_division = [0, 1, 1]
### TEMPLATED
talea_rhythm_maker = new(
talea_rhythm_maker,
burnish_specifier=burnish_specifier,
extra_counts_per_division=extra_counts_per_division,
tie_specifier=tie_specifier,
)
divisions # remind ourselves of original divisions
sketch = make_sketch(talea_rhythm_maker, divisions)
# show(sketch)
### EXAMPLE THREE ###
score = Score()
for i in range(12):
selections = talea_rhythm_maker(divisions, rotation=i)
voice = Voice(selections)
staff = Staff([voice], context_name='RhythmicStaff')
score.append(staff)
divisions = sequencetools.rotate_sequence(divisions, 1)
sketch = make_sketch_lilypond_file(score)
#show(sketch) |
from poynt import API
class BusinessApplication():
"""
A Class providing methods to fetch business application information
"""
@classmethod
def get_business_application(cls, business_id):
"""
Gets a business application
Arguments:
business_id (str): the business ID of the business
"""
api = API.shared_instance()
return api.request(
url='/businesses/' + business_id + '/payfac-application',
method='GET',
app='WEB',
)
@classmethod
def get_business_account(cls, business_id):
"""
Get account information related to the business
Arguments:
business_id (str): the business ID of the business
"""
api = API.shared_instance()
return api.request(
url='/businesses/' + business_id + '/payfac-application/account',
method='GET',
app='WEB',
)
@classmethod
def get_business_orders(cls, business_id):
"""
Gets the orders attached to business application
Arguments:
business_id (str): the business ID of the business
"""
api = API.shared_instance()
return api.request(
url='/businesses/' + business_id +
'/payfac-application/orders',
method='GET',
app='WEB',
)
@classmethod
def get_business_application_status(cls, business_id):
"""
Get the status of business application
Arguments:
business_id (str): the business ID of the business
"""
api = API.shared_instance()
return api.request(
url='/businesses/' + business_id +
'/payfac-application/status',
method='GET',
app='WEB',
)
@classmethod
def get_business_application_profile(cls, business_id):
"""
Get business profile info
Arguments:
business_id (str): the business ID of the business
"""
api = API.shared_instance()
return api.request(
url='/businesses/' + business_id +
'/payfac-application/profile',
method='GET',
app='WEB',
)
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
MOPA
An independet project
Método de Obtenção da Posição de Atirador
-------------------
begin : 2019-03-06
git sha : $Format:%H$
copyright : (C) 2019 by João P. Esperidião
email : joao.p2709@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from os import path
from PyQt5 import uic
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from PyQt5.QtWidgets import QWidget
from Core.enums import Enums
from Settings.settings import Settings
from Core.Observation.observation import Observation
from Core.Observation.observationsManager import ObservationsManager
from Gui.CustomWidgets.FeatureForms.featureForm import FeatureForm
FORM_CLASS, _ = uic.loadUiType(
path.join(path.dirname(__file__), 'observationWidget.ui')
)
class ObservationWidget(QWidget, FORM_CLASS):
observationAdded, observationEdited = pyqtSignal(Observation), pyqtSignal(Observation)
selectionChanged = pyqtSignal(int)
def __init__(self, parent=None, settings=None):
"""
Class constructor.
:param parent: (QWidget) any widget from Qt5 parent to this dialog.
:param settings: (Settings) MOPA's settings database manager.
"""
super(ObservationWidget, self).__init__(parent)
self.setupUi(self)
self.parent = parent
self.settings = settings if settings is not None else Settings()
self._obsManager = ObservationsManager(self.settings)
self.refresh(obsList=[])
def clear(self):
"""
Clears observation combo box and GUI information, if any.
"""
self.obsComboBox.clear()
def fillObsComboBox(self, obsList):
"""
Fills all given observations to the selection combo box.
:param obsList: (list-of-Observation) observations to be listed on widget.
"""
self.clear()
self.obsComboBox.addItem(self.tr("Select an observation..."))
self.obsComboBox.addItems([
"Observation {0}".format(o['id']) for o in obsList
])
def setCurrentObservation(self, obs):
"""
Sets current observation from its text as current selection on combo box.
:param obs: (str) observation exhibition text.
:return: (bool) whether selection was set.
"""
self.obsComboBox.setCurrentText(obs)
return self.obsComboBox.currentText() == obs
def currentText(self):
"""
Gets current text exhibited on combo box.
:return: (str) current text.
"""
return self.obsComboBox.currentText()
def refresh(self, obsList):
"""
Resets widget to initial state.
:param obsList: (list-of-Observation) observations to be listed on widget.
"""
self.clear()
self.fillObsComboBox(obsList)
self.setObsInformation(self.currentObservation())
def obsIdFromIndex(self, idx):
"""
Gets the observation ID from item at the given index. If index is invalid None
is returned.
:param idx: (int) item index in the observation selection combo box.
:return: (int) observation ID.
"""
if idx > 0 and self.obsComboBox.count() > idx:
return int(self.obsComboBox.itemText(idx).split(" ")[-1])
return None
def observationFromIndex(self, idx):
"""
Gets the observation from the indicated index. If index is invalid None is returned.
:param idx: (int) item index in the observation selection combo box.
:return: (Observation) observation instance from the indicated index.
"""
oid = self.obsIdFromIndex(idx)
if oid is not None:
return self._obsManager.observationFromId(oid)
return None
def obsId(self):
"""
Gets current observation's ID. Returns None if no selection was made.
:return: (int) observation's ID.
"""
if self.obsComboBox.currentIndex() < 1:
return None
return int(self.obsComboBox.currentText().split(" ")[-1])
def currentObservation(self):
"""
Gets the observation object for current selection.
:return: (Observation) an instance of Observation object for current selection.
"""
if self.obsId() is None:
return None # should it be a new instance of observation obj?
return self._obsManager.observationFromId(self.obsId())
def setObsInformation(self, obs):
"""
Sets observation information to widget interface.
:param obs: (Observation) observation to have its info exposed.
"""
obs = obs or self._obsManager.newObservation()
self.azLabel.setText(self.tr("Azimuth: {0:.2f}").format(obs['azimuth']))
self.zenLabel.setText(self.tr("Vertical angle: {0:.2f}").format(obs['zenith']))
self.dateLabel.setText(self.tr("Observation date: {0}").format(obs['date']))
title = self.tr("Event information")
if obs.isValid():
title += self.tr(" from station ID = {0}").format(obs['sensorId'])
self.groupBox.setTitle(title)
def isEditable(self):
"""
Verifies whether current selection may be edited.
:return: (bool) edition status.
"""
return not self.obsComboBox.currentIndex() < 1
@pyqtSlot(int, name="on_obsComboBox_currentIndexChanged")
def checkEditButtonStatus(self, idx):
"""
Updates the edit push button enable status.
:param idx: (int) current index.
"""
self.updateObservationPushButton.setEnabled(self.isEditable())
self.setObsInformation(self.currentObservation())
self.selectionChanged.emit(idx)
def parametersFromForm(self, attributes):
"""
Sets the correct variable types from form info.
:param attributes: (dict) form's info.
:return: (dict) values reasigned to its correct variable type.
"""
try:
attributes['id'] = int(attributes['id'])
except:
pass
try:
attributes['azimuth'] = float(attributes['azimuth'])
except:
pass
try:
attributes['zenith'] = float(attributes['zenith'])
except:
pass
try:
attributes['sensorId'] = int(attributes['sensorId'])
except:
pass
if not isinstance(attributes['date'], str):
try:
attributes['date'] = str(attributes['date'])
except:
pass
return attributes
def checkFormValidity(self, form, checkIfExists=False):
"""
Checks form validity.
:param form: (FeatureForm) form to have its contents checked.
:param checkIfExists: (bool) indicates whether entry existance should be checked.
:return: (bool) form validity status.
"""
attr = self.parametersFromForm(form.read())
ir = self._obsManager.newObservation().invalidationReason(attr)
if checkIfExists and self._obsManager.idExists(attr['id']):
ir = self.tr("Observation ID {0} already exists into the database.").\
format(attr['id'])
form.setInvalidationMessage(ir)
return ir == ''
@pyqtSlot(bool, name='on_updateObservationPushButton_clicked')
def openEditForm(self):
"""
Opens feature form for current selection in edition mode.
"""
form = FeatureForm(self.currentObservation(), True, self.parent)
form.setWindowTitle(form.tr("Edit observation's attributes"))
form.fieldReadOnly('id', True) # since it is an EDITION, id should be kept the same.
form.okButtonClicked.connect(self.checkFormValidity)
if form.exec_() == Enums.Finished:
attr = self.parametersFromForm(form.read())
obs = self._obsManager.observationFromAttributes(attr)
if obs.isValid():
self._obsManager.updateObservation(obs)
self.obsComboBox.setItemText(
self.obsComboBox.currentIndex(),
self.tr("Observation {0}").format(obs['id'])
)
# and update its attributes to GUI
self.setObsInformation(obs)
form.blockSignals(True)
del form
self.observationEdited.emit(obs)
@pyqtSlot(bool, name='on_addObservationPushButton_clicked')
def openForm(self):
"""
Opens attribute form to be filled in order to add a new sensor.
"""
form = FeatureForm(self._obsManager.newObservation(), True, self.parent)
form.setWindowTitle(self.tr("Add a new observation"))
form.okButtonClicked.connect(lambda f : self.checkFormValidity(f, True))
if form.exec_() == Enums.Finished:
attr = self.parametersFromForm(form.read())
obs = self._obsManager.observationFromAttributes(attr)
if obs.isValid():
self._obsManager.addObservation(
azimuth=obs['azimuth'], zenith=obs['zenith'],\
sensorId=obs['sensorId']
)
form.blockSignals(True)
del form
name = self.tr("Observation {0}").format(obs['id'])
self.obsComboBox.addItem(name)
self.observationAdded.emit(obs)
|
# -*- coding: UTF-8 -*-
"""
An unofficial implementation of CSP-DarkNet with pytorch
@Cai Yichao 2020_09_30
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
# from torchsummary import summary
from .CSPdarknet53conv_bn import Mish, BN_Conv_Mish
from .build import BACKBONE_REGISTRY
from .backbone import Backbone
from detectron2.modeling import ShapeSpec
class ResidualBlock(nn.Module):
"""
basic residual block for CSP-Darknet
"""
def __init__(self, chnls, inner_chnnls=None):
super(ResidualBlock, self).__init__()
if inner_chnnls is None:
inner_chnnls = chnls
self.conv1 = BN_Conv_Mish(chnls, inner_chnnls, 1, 1, 0) # always use samepadding
self.conv2 = nn.Conv2d(inner_chnnls, chnls, 3, 1, 1, bias=False)
self.bn = nn.BatchNorm2d(chnls)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out = self.bn(out) + x
return Mish()(out)
class CSPFirst(nn.Module):
"""
First CSP Stage
"""
def __init__(self, in_chnnls, out_chnls):
super(CSPFirst, self).__init__()
self.dsample = BN_Conv_Mish(in_chnnls, out_chnls, 3, 2, 1) # same padding
self.trans_0 = BN_Conv_Mish(out_chnls, out_chnls, 1, 1, 0)
self.trans_1 = BN_Conv_Mish(out_chnls, out_chnls, 1, 1, 0)
self.block = ResidualBlock(out_chnls, out_chnls // 2)
self.trans_cat = BN_Conv_Mish(2 * out_chnls, out_chnls, 1, 1, 0)
def forward(self, x):
x = self.dsample(x)
out_0 = self.trans_0(x)
out_1 = self.trans_1(x)
out_1 = self.block(out_1)
out = torch.cat((out_0, out_1), 1)
out = self.trans_cat(out)
return out
class CSPStem(nn.Module):
"""
CSP structures including downsampling
"""
def __init__(self, in_chnls, out_chnls, num_block):
super(CSPStem, self).__init__()
self.dsample = BN_Conv_Mish(in_chnls, out_chnls, 3, 2, 1)
self.trans_0 = BN_Conv_Mish(out_chnls, out_chnls // 2, 1, 1, 0)
self.trans_1 = BN_Conv_Mish(out_chnls, out_chnls // 2, 1, 1, 0)
self.blocks = nn.Sequential(*[ResidualBlock(out_chnls // 2) for _ in range(num_block)])
self.trans_cat = BN_Conv_Mish(out_chnls, out_chnls, 1, 1, 0)
def forward(self, x):
x = self.dsample(x)
out_0 = self.trans_0(x)
out_1 = self.trans_1(x)
out_1 = self.blocks(out_1)
out = torch.cat((out_0, out_1), 1)
out = self.trans_cat(out)
return out
class CSP_DarkNet(Backbone):
"""
CSP-DarkNet
"""
def __init__(self, num_blocks: object, num_classes=1000) -> object:
super(CSP_DarkNet, self).__init__()
chnls = [64, 128, 256, 512, 1024]
self.conv0 = BN_Conv_Mish(3, 32, 3, 1, 1) # same padding
self.neck = CSPFirst(32, chnls[0])
self.body = nn.Sequential(
*[CSPStem(chnls[i], chnls[i + 1], num_blocks[i]) for i in range(4)])
self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(chnls[4], num_classes)
def forward(self, x):
outputs={}
out = self.conv0(x)
out = self.neck(out)
outlist = ['0','1', '2', '3']
outnames = ['res2','res3', 'res4', 'res5']
ptr = 0
for i in range(len(self.body)):
out = self.body._modules[str(i)](out)
if str(i) in outlist:
outputs[outnames[ptr]] = out
ptr += 1
# out = self.global_pool(out)
# out = out.view(out.size(0), -1)
# out = self.fc(out)
return outputs
def output_shape(self):
return {'res2': ShapeSpec(channels=128, stride=4),
'res3': ShapeSpec(channels=256, stride=8),
'res4': ShapeSpec(channels=512, stride=16),
'res5': ShapeSpec(channels=1024, stride=32)}
def csp_darknet_53(num_classes=1000):
return CSP_DarkNet([2, 8, 8, 4], num_classes)
@BACKBONE_REGISTRY.register()
def build_CSPdarknet53_backbone(cfg, input_shape):
return csp_darknet_53()
if __name__ == '__main__':
net=csp_darknet_53()
from torchsummary import summary
summary(net, (3, 224, 224))
pass |
# -*- coding: utf-8 -*-
"""
@author: Ardalan MEHRANI <ardalan77400@gmail.com>
@brief:
"""
import numpy as np
from sklearn import datasets, metrics, model_selection
from pylightgbm.models import GBMClassifier
# Parameters
seed = 1337
path_to_exec = "~/Documents/apps/LightGBM/lightgbm"
np.random.seed(seed) # for reproducibility
X, Y = datasets.make_classification(n_samples=1000, n_features=100, n_classes=2, random_state=seed)
x_train, x_test, y_train, y_test = model_selection.train_test_split(X, Y, test_size=0.2, random_state=seed)
params = {'exec_path': path_to_exec,
'num_iterations': 1000, 'learning_rate': 0.01,
'min_data_in_leaf': 1, 'num_leaves': 5,
'metric': 'binary_error', 'verbose': False,
'early_stopping_round': 20}
clfs = [
['gbdt', GBMClassifier(boosting_type='gbdt', **params)],
['dart', GBMClassifier(boosting_type='dart', drop_rate=0.02, drop_seed=4, **params)],
]
for boosting_type, clf in clfs:
clf.fit(x_train, y_train, test_data=[(x_test, y_test)])
y_prob = clf.predict_proba(x_test)
y_pred = y_prob.argmax(-1)
print("booster {} loss: {}, accuracy: {}, best round: {}".format(
boosting_type,
metrics.log_loss(y_test, y_prob),
metrics.accuracy_score(y_test, y_pred),
clf.best_round
))
|
import pandas as pd
import re
def get_sisa_kamar(x):
try:
gotdata = x.split('\n \n\t')[1].split('\n \n\t\t\t\t')[0]
except IndexError:
gotdata = x
return gotdata
def get_area(x):
try:
gotdata = x.split('\n \n\t')[1]\
.split('\n \n\t\t\t\t')[1].split('\n\t\t\t')[1].strip()
except IndexError:
gotdata = x
return gotdata
def get_fasilitas(x):
try:
gotdata = x.split('\n \n\t')[1]\
.split('\n \n\t\t\t\t')[1].split('\n\t\t\t')[2].replace('\t','').split('·')
except IndexError:
gotdata = x
return gotdata
def get_harga(x):
try:
gotdata = x.split('\n \n\t')[1]\
.split('\n \n\t\t\t\t')[1].split('\n\t\t\t')[4].replace('\t\t\t\tRp ','')
except IndexError:
gotdata = x
return gotdata
def main():
df = pd.read_csv('merged_kota_drop_dupl.csv')
# isinstance(message, bytes)
df['type_kos'] = df['texts'].apply(lambda x: x.split('\n \n\t')[0] \
if isinstance(x, float) is False else "no-type")
df['sisa_kamar'] = df['texts'].apply(lambda x: get_sisa_kamar(x) \
if isinstance(x, float) is False else "no-sisa-kamar")
df['area'] = df['texts'].apply(lambda x: get_area(x) \
if isinstance(x, float) is False else "no-area")
df['fasilitas'] = df['texts'].apply(lambda x: get_fasilitas(x) \
if isinstance(x, float) is False else "no-fasilitas")
df['harga'] = df['texts'].apply(lambda x: get_harga(x) \
if isinstance(x, float) is False else "no-harga")
df['harga_nomina'] = df['harga'].apply(lambda x: int(x.replace('.','')) \
if x.replace('.','').isdigit() is True else x)
df.to_csv("transform_1.csv", index=False)
if __name__ == '__main__':
main()
|
import ipaddr
import re
from django.core.exceptions import ValidationError
from cyder.cydns.validation import validate_domain_name
from cyder.base.eav.utils import strip_and_get_base, validate_list
### Naming conventions in this module:
###
### - A function that does not start with '_' is a validator that can be
### used in Key.value_type
###
### - A function that starts with '_' is a utility function not useful
### outside this module
###
### - A function like '_foo' raises an exception if the value is not
### valid
###
### - A function like '_is_foo' returns a bool indicating whether the
### value is valid
### Parameter assumptions:
### * 'value' in all functions:
### - basestring object
### - no leading or trailing spaces
### * 'value' in validators:
### - not '' or u''
VALUE_TYPES = (
('flag', "flag: 'yes', 'no', 'true', or 'false' (case-insensitive)"),
('text', "text: 'text' -> '\"text\"'"),
('string', "string: 'text' -> '\"text\"' or "
"'1a:2b:3c:4d' -> '1a:2b:3c:4d'"),
('identifier', "identifier: 'identifier'"),
('int8', "int8: 8-bit signed integer"),
('uint8', "uint8: 8-bit unsigned integer"),
('int16', "int16: 16-bit signed integer"),
('uint16', "uint16: 16-bit unsigned integer"),
('int32', "int32: 32-bit signed integer"),
('uint32', "uint32: 32-bit unsigned integer"),
('host', "host: IPv4 address ('1.2.3.4') or hostname ('example.com')"),
('domain', "domain: 'example.com'"),
('leasetime', "lease time: uint32 or 'infinite'"),
('flag_optional_text', "'flag [text]'"),
('uint8_list', "'uint8, uint8, uint8 ...'"),
('uint16_list', "'uint16, uint16, uint16 ...'"),
('host_list', "host list: 'host, host, host ...'"),
('host_pair', "'host host'"),
('host_pair_list', "'host host, host host, host host ...'"),
('flag_host_list', "'flag host, host, host ...'"),
('domain_list', "domain list: '\"domain\", \"domain\", \"domain\" ...'"),
('ddnsstyle', "DDNS style"),
('syslogfacility', "syslog facility"),
('ldapmethod', "LDAP method"),
('ldapsslusage', "ldap-ssl-usage value"),
('ldaptlsreqcert', "ldap-tls-reqcert value"),
('ldaptlscrlcheck', "ldap-tls-crlcheck value"),
)
###########################################
### Utility functions that return bools ###
###########################################
# These are not validators. They return bools.
def _is_ip(value):
try:
ipaddr.IPAddress(value)
return True
except ValueError:
return False
def _is_ip4(value):
try:
ipaddr.IPv4Address(value)
return True
except ipaddr.AddressValueError:
return False
def _is_ip6(value):
try:
ipaddr.IPv6Address(value)
return True
except ipaddr.AddressValueError:
return False
def _is_domain(value):
if not value:
return False
if value[-1] == '.':
value = value[:-1]
try:
validate_domain_name(value)
return True
except ValidationError:
# validate_domain_name doesn't always include the invalid domain in its
# exceptions. We need to do that for it.
return False
def _is_uint(value, bits):
try:
value, base = strip_and_get_base(value)
if base == 10 and not value.isdigit(): # "performance" hack
return False
return 0 <= int(value, base) <= (2**bits - 1)
except ValueError:
return False
def _is_int(value, bits):
try:
value, base = strip_and_get_base(value)
return -(2**(bits-1)) <= int(value, base) <= (2**(bits-1) - 1)
except ValueError:
return False
###############################################
### Utility functions that raise exceptions ###
###############################################
# These are not validators, but they do raise exceptions.
def _uint(value, bits):
if not _is_uint(value, bits):
raise ValidationError("Invalid {0}-bit unsigned integer '{1}'"
.format(bits, value))
def _int(value, bits):
if not _is_int(value, bits):
raise ValidationError("Invalid {0}-bit signed integer '{1}'"
.format(bits, value))
def _unquote(value):
if not value[0] == value[-1] == '"':
raise ValidationError("'{0}' must be quoted".format(value))
return value[1:-1]
def _dhcpclass(value):
if '"' in value:
raise ValidationError("Invalid DHCP class '{0}'".format(value))
##################
### Validators ###
##################
# These return None or raise an exception
def flag(value):
# ISC dhcpd ignores capitalization in flags. Why? No idea.
if not value.lower() in ('on', 'off', 'true', 'false'):
raise ValidationError("Invalid flag '{0}'".format(value))
def text(value):
if '"' in value:
raise ValidationError("Invalid text '{0}'".format(value))
def string(value):
if '"' in value:
raise ValidationError("Invalid string '{0}'".format(value))
_identifier_regex = re.compile(r'^[a-zA-Z0-9-][a-zA-Z0-9_-]*$')
def identifier(value):
if not (_identifier_regex.match(value) and
re.search(r'[a-zA-Z_-]', value)): # at least one non-numeric char
raise ValidationError("Invalid identifier '{0}'".format(value))
def uint8(value):
_uint(value, 8)
def int8(value):
_int(value, 8)
def uint16(value):
_uint(value, 16)
def int16(value):
_int(value, 16)
def uint32(value):
_uint(value, 32)
def int32(value):
_int(value, 32)
def host(value):
"""
Where ISC dhcpd expects a host (what it calls an 'ip-address'), it
validates the value as a hostname. The semantics differs depending on
whether the value resembles an IPv4 address, but the validation does not.
Therefore, a malformed IPv4 address that is still a valid hostname passes
validation and is interpreted as a hostname. It's up to the user to make
sure that what they think is an IPv4 address actually is one.
NOTE: validate_domain_name is outside of this module and, therefore, its
jurisdiction. At the moment, validate_domain_name considers domain names
like '1.2.3.4' to be valid. However, if that behavior changes in the
future, host()'s behavior must not. That's why we fall back to _is_ip4.
"""
if not (_is_domain(value) or _is_ip4(value)):
raise ValidationError("Invalid host '{0}'".format(value))
def domain(value):
if not _is_domain(value):
raise ValidationError("Invalid domain '{0}'".format(value))
def leasetime(value):
if not (_is_uint(value, 32) or value == 'infinite'):
raise ValidationError("Invalid lease time '{0}'".format(value))
def flag_optional_text(value):
splat = value.split(None, 1)
if len(splat) == 1:
flag(value)
else:
flag(splat[0])
text(_unquote(splat[1]))
def uint8_list(value):
validate_list(value, uint8)
def uint16_list(value):
validate_list(value, uint16)
def host_list(value):
validate_list(value, host)
def host_pair(value):
splat = value.split()
if len(splat) != 2:
comparison = 'few' if len(splat) < 2 else 'many'
raise ValidationError("Invalid host pair '{0}'; too {1} hosts"
.format(value, comparison))
[first, second] = splat
host(first)
host(second)
def host_pair_list(value):
validate_list(value, host_pair)
def flag_host_list(value):
splat = value.split(None, 1) # split on first whitespace
if len(splat) == 1:
raise ValidationError("Invalid flag and host list; missing host list"
.format(value))
[first, rest] = splat
flag(first)
host_list(rest)
def domain_list(value):
validate_list(value, lambda x: domain(_unquote(x)))
def ddnsstyle(value):
if value not in ('none', 'ad-hoc', 'interim'):
raise ValidationError("Invalid DDNS style")
def syslogfacility(value):
if value not in ('kern', 'user', 'mail', 'daemon', 'auth', 'syslog',
'lpr', 'news', 'uucp', 'cron', 'authpriv', 'ftp',
'local0', 'local1', 'local2', 'local3', 'local4',
'local5', 'local6', 'local7'):
raise ValidationError("Invalid syslog facility")
def ldapmethod(value):
if value not in ('static', 'dynamic'):
raise ValidationError("Invalid LDAP method")
def ldapsslusage(value):
if value not in ('off', 'on', 'ldaps', 'start_tls'):
raise ValidationError("Invalid ldap-ssl-usage value") # FIXME?
def ldaptlsreqcert(value):
if value not in ('never', 'hard', 'demand', 'allow', 'try'):
raise ValidationError("Invalid ldap-tls-reqcert value") # FIXME?
def ldaptlscrlcheck(value):
if value not in ('none', 'peer', 'all'):
raise ValidationError("Invalid ldap-tls-crlcheck value") # FIXME?
|
"""Support for Velbus switches."""
import logging
from velbus.util import VelbusException
from homeassistant.components.switch import SwitchEntity
from . import VelbusEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Velbus switch based on config_entry."""
cntrl = hass.data[DOMAIN][entry.entry_id]["cntrl"]
modules_data = hass.data[DOMAIN][entry.entry_id]["switch"]
entities = []
for address, channel in modules_data:
module = cntrl.get_module(address)
entities.append(VelbusSwitch(module, channel))
async_add_entities(entities)
class VelbusSwitch(VelbusEntity, SwitchEntity):
"""Representation of a switch."""
@property
def is_on(self):
"""Return true if the switch is on."""
return self._module.is_on(self._channel)
def turn_on(self, **kwargs):
"""Instruct the switch to turn on."""
try:
self._module.turn_on(self._channel)
except VelbusException as err:
_LOGGER.error("A Velbus error occurred: %s", err)
def turn_off(self, **kwargs):
"""Instruct the switch to turn off."""
try:
self._module.turn_off(self._channel)
except VelbusException as err:
_LOGGER.error("A Velbus error occurred: %s", err)
|
from .keygen import KeyInfo, free_keys_left, generate_key, get_key_information, set_key_used, validate_key |
import os
import argparse
import gzip
import json
import tqdm
DEFAULT_ARGS = {"input_path": "data/datasets/pointnav/citi/v2/train/train.json.gz"}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--input-path", "-i")
parser.add_argument("--output-path", "-o")
parser.add_argument("extra_cfg", nargs=argparse.REMAINDER)
parser.set_defaults(**DEFAULT_ARGS)
return parser.parse_args()
def reverse_raw_episode(episode):
episode["episode_id"] = "reversed_" + episode["episode_id"]
start = episode["start_position"]
goal = episode["goals"][0]["position"]
episode["start_position"] = goal
episode["goals"][0]["position"] = start
return episode
def main(args):
if args.output_path is None:
prefix, *suffix = args.input_path.rsplit('/', 2)
args.output_path = '/'.join((prefix + "_reversed", *suffix))
print(f"Opening {args.input_path}")
with gzip.open(args.input_path) as f:
data = json.load(f)
reversed_episodes = [reverse_raw_episode(episode)
for episode in tqdm.tqdm(data["episodes"], desc="Reversing episodes")]
print(f"Saving {args.output_path}")
os.makedirs(os.path.dirname(args.output_path), exist_ok=True)
with gzip.open(args.output_path, 'wt') as f:
json.dump({"episodes": reversed_episodes}, f)
print("Done")
if __name__ == "__main__":
main(parse_args())
|
"""
Introduction of Neighbor Sampling for GNN Training
==================================================
In :doc:`previous tutorials <../blitz/1_introduction>` you have learned how to
train GNNs by computing the representations of all nodes on a graph.
However, sometimes your graph is too large to fit the computation of all
nodes in a single GPU.
By the end of this tutorial, you will be able to
- Understand the pipeline of stochastic GNN training.
- Understand what is neighbor sampling and why it yields a bipartite
graph for each GNN layer.
"""
######################################################################
# Message Passing Review
# ----------------------
#
# Recall that in `Gilmer et al. <https://arxiv.org/abs/1704.01212>`__
# (also in :doc:`message passing tutorial <../blitz/3_message_passing>`), the
# message passing formulation is as follows:
#
# .. math::
#
#
# m_{u\to v}^{(l)} = M^{(l)}\left(h_v^{(l-1)}, h_u^{(l-1)}, e_{u\to v}^{(l-1)}\right)
#
# .. math::
#
#
# m_{v}^{(l)} = \sum_{u\in\mathcal{N}(v)}m_{u\to v}^{(l)}
#
# .. math::
#
#
# h_v^{(l)} = U^{(l)}\left(h_v^{(l-1)}, m_v^{(l)}\right)
#
# where DGL calls :math:`M^{(l)}` the *message function*, :math:`\sum` the
# *reduce function* and :math:`U^{(l)}` the *update function*. Note that
# :math:`\sum` here can represent any function and is not necessarily a
# summation.
#
# Essentially, the :math:`l`-th layer representation of a single node
# depends on the :math:`(l-1)`-th layer representation of the same node,
# as well as the :math:`(l-1)`-th layer representation of the neighboring
# nodes. Those :math:`(l-1)`-th layer representations then depend on the
# :math:`(l-2)`-th layer representation of those nodes, as well as their
# neighbors.
#
# The following animation shows how a 2-layer GNN is supposed to compute
# the output of node 5:
#
# |image1|
#
# You can see that to compute node 5 from the second layer, you will need
# its direct neighbors’ first layer representations (colored in yellow),
# which in turn needs their direct neighbors’ (i.e. node 5’s second-hop
# neighbors’) representations (colored in green).
#
# .. |image1| image:: https://data.dgl.ai/tutorial/img/sampling.gif
#
######################################################################
# Neighbor Sampling Overview
# --------------------------
#
# You can also see from the previous example that computing representation
# for a small number of nodes often requires input features of a
# significantly larger number of nodes. Taking all neighbors for message
# aggregation is often too costly since the nodes needed for input
# features would easily cover a large portion of the graph, especially for
# real-world graphs which are often
# `scale-free <https://en.wikipedia.org/wiki/Scale-free_network>`__.
#
# Neighbor sampling addresses this issue by selecting a subset of the
# neighbors to perform aggregation. For instance, to compute
# :math:`\boldsymbol{h}_8^{(2)}`, you can choose two of the neighbors
# instead of all of them to aggregate, as in the following animation:
#
# |image2|
#
# You can see that this method uses much fewer nodes needed in message
# passing for a single minibatch.
#
# .. |image2| image:: https://data.dgl.ai/tutorial/img/bipartite.gif
#
######################################################################
# You can also notice in the animation above that the computation
# dependencies in the animation above can be described as a series of
# bipartite graphs.
# The output nodes (called *destination nodes*) are on one side and all the
# nodes necessary for inputs (called *source nodes*) are on the other side.
# The arrows indicate how the sampled neighbors propagates messages to the nodes.
# DGL calls such graphs *message flow graphs* (MFG).
#
# Note that some GNN modules, such as `SAGEConv`, need to use the destination
# nodes' features on the previous layer to compute the outputs. Without
# loss of generality, DGL always includes the destination nodes themselves
# in the source nodes.
#
######################################################################
# What’s next?
# ------------
#
# :doc:`Stochastic GNN Training for Node Classification in
# DGL <L1_large_node_classification>`
#
# Thumbnail Courtesy: Understanding graph embedding methods and their applications, Mengjia Xu
# sphinx_gallery_thumbnail_path = '_static/large_L0_neighbor_sampling_overview.png'
|
import numpy as np
import scipy as sp
import scipy.linalg as la
from scipy.linalg import svd
from scipy.linalg import eig
from numpy import matmul as mm
from scipy.linalg import expm as expm
from numpy import transpose as tp
def rank_to_normal(data, c, n):
# Standard quantile function
data = (data - c) / (n - 2 * c + 1)
return sp.stats.norm.ppf(data)
def rank_int(data, c=3.0 / 8):
if data.ndim > 1:
do_reshape = True
dims = data.shape
data = data.flatten()
else:
do_reshape = False
# Set seed
np.random.seed(0)
# Get rank, ties are averaged
data = sp.stats.rankdata(data, method="average")
# Convert rank to normal distribution
transformed = rank_to_normal(data=data, c=c, n=len(data))
if do_reshape:
transformed = transformed.reshape(dims)
return transformed
def matrix_normalization(A, version=None, c=1):
'''
Args:
A: np.array (n_parcels, n_parcels)
adjacency matrix from structural connectome
version: str
options: 'continuous' or 'discrete'. default=None
string variable that determines whether A is normalized for a continuous-time system or a discrete-time
system. If normalizing for a continuous-time system, the identity matrix is subtracted.
c: int
normalization constant, default=1
Returns:
A_norm: np.array (n_parcels, n_parcels)
normalized adjacency matrix
'''
if version == 'continuous':
print("Normalizing A for a continuous-time system")
elif version == 'discrete':
print("Normalizing A for a discrete-time system")
elif version == None:
raise Exception("Time system not specified. "
"Please nominate whether you are normalizing A for a continuous-time or a discrete-time system "
"(see function help).")
# singluar value decomposition
u, s, vt = svd(A)
# Matrix normalization for discrete-time systems
A_norm = A / (c + s[0])
if version == 'continuous':
# for continuous-time systems
A_norm = A_norm - np.eye(A.shape[0])
return A_norm
def get_p_val_string(p_val):
if p_val == 0.0:
p_str = "-log10($\mathit{:}$)>25".format('{p}')
elif p_val < 0.001:
p_str = '$\mathit{:}$ < 0.001'.format('{p}')
elif p_val >= 0.001 and p_val < 0.05:
p_str = '$\mathit{:}$ < 0.05'.format('{p}')
else:
p_str = "$\mathit{:}$ = {:.3f}".format('{p}', p_val)
return p_str
def expand_states(states):
"""
This function takes a list of integer values that designate a distinct set of binary brain states and returns
a pair of matrices (x0_mat, xf_mat) that encode all possible pairwise transitions between those states
Args:
states: numpy array (N x 1)
a vector of integers that designate which regions belong to which states. Note, regions cannot belong to
more than one brain state. For example, assuming N = 12, if:
states = np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
then the first 4 regions belong to state 0, the next 4 to state 1, and the final 4 to state 2
Returns:
x0_mat: boolean array (N, n_transitions)
boolean array of initial states. In each column, True designates regions belonging to a given initial state
xf_mat: boolean array (N, n_transitions)
boolean array of target states. In each column, True designates regions belonging to a given target state
"""
unique, counts = np.unique(states, return_counts=True)
n_parcels = len(states)
n_states = len(unique)
x0_mat = np.zeros((n_parcels, 1)).astype(bool)
xf_mat = np.zeros((n_parcels, 1)).astype(bool)
for i in np.arange(n_states):
for j in np.arange(n_states):
x0 = states == i
xf = states == j
x0_mat = np.append(x0_mat, x0.reshape(-1, 1), axis=1)
xf_mat = np.append(xf_mat, xf.reshape(-1, 1), axis=1)
x0_mat = x0_mat[:, 1:]
xf_mat = xf_mat[:, 1:]
return x0_mat, xf_mat
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Larry Xiao
# @Date: 2014-07-30 09:22:13
# @Last Modified by: Larry Xiao
# @Last Modified time: 2014-07-30 09:30:18
import sys
import numpy
class ReportEntry:
DateTime = ''
TimetoLoad = ''
TimetoPartition = ''
Data = ''
Strategy = ''
ThreshHold = ''
Vertices = ''
Edges = ''
Replications = ''
NumParts = ''
Factor = ''
BalanceVertex = []
BalanceEdge = []
Requirement = ''
TimetoExecute = ''
Result = ''
ShuffleRead = ''
ShuffleWrite = ''
out = ReportEntry()
# out.Factor = '1'
# out.BalanceVertex.append('a')
# print vars(out)
# pagerank output format
# from lib/Analytics.scala
for line in sys.stdin:
if "GraphLoader" in line:
out.DateTime = line.split('INFO')[0]
out.TimetoLoad = (line.split()[6])
if "GraphImpl" in line:
out.TimetoPartition = (line.split()[6])
if "INPUT" in line:
out.Data = (line[line.index('INPUT')+5:-1])
if "Requirement" in line:
out.Requirement = (line.split()[-1])
if "PatitionStrategy" in line:
out.Strategy = (line.split()[-1])
if "vertices" in line and "stat_vertices" not in line:
out.Vertices = (line.split()[-1])
if "GRAPHX" in line and "edges" in line and "stat_edges" not in line:
out.Edges = (line.split()[-1])
if "replications" in line:
out.Replications = (line.split()[-1])
if "execution" in line:
out.TimetoExecute = (line.split()[7])
if "rank" in line:
out.Result = (line.split()[-1])
if "partitions" in line:
out.NumParts = (line.split()[-1])
if "ThreshHold" in line:
out.ThreshHold = (line.split()[-1])
if "stat_vertices" in line:
stringlist = line.split("Array")[1].replace("(","").replace(")","").split(",")[1::2]
# print stringlist
intlist = map(int, stringlist)
narray = numpy.array(intlist)
out.BalanceVertex.append("vertices")
out.BalanceVertex.append(str(numpy.std(narray)))
out.BalanceVertex.append(str(numpy.average(narray)))
out.BalanceVertex.append(str(numpy.min(narray)))
out.BalanceVertex.append(str(numpy.max(narray)))
out.BalanceVertex.append(str((numpy.max(narray)-numpy.min(narray))/numpy.average(narray)))
if "stat_edges" in line:
stringlist = line.split("Array")[1].replace("(","").replace(")","").split(",")[1::2]
# print stringlist
intlist = map(int, stringlist)
narray = numpy.array(intlist)
out.BalanceEdge.append("edges")
out.BalanceEdge.append(str(numpy.std(narray)))
out.BalanceEdge.append(str(numpy.average(narray)))
out.BalanceEdge.append(str(numpy.min(narray)))
out.BalanceEdge.append(str(numpy.max(narray)))
out.BalanceEdge.append(str((numpy.max(narray)-numpy.min(narray))/numpy.average(narray)))
if "SparkDeploySchedulerBackend: Connected to Spark cluster with app ID" in line:
try:
import urllib2
# get correct ID, and omit newline \n
appID = line.split('ID ')[1][:-1]
url = 'http://brick0:8080/history/' + appID + '/executors/'
response = urllib2.urlopen(url)
html = response.read()
import re
matchObj = re.findall(r'<td sorttable_customkey="(.*?)">', html)
# shuffle read
out.ShuffleRead = str(sum(map(int,matchObj[4::6])))
# shuffle write
out.ShuffleWrite = str(sum(map(int,matchObj[5::6])))
except Exception:
pass
try:
out.Factor = str( float(out.Replications) / float(out.Vertices) )
except Exception:
pass
#print out.DateTime
#print out.TimetoLoad
#print out.TimetoPartition
#print out.Data
#print out.Strategy
#print out.ThreshHold
#print out.Vertices
#print out.Edges
#print out.Replications
#print out.NumParts
#print out.Factor
#for i in out.BalanceVertex:
# print i
#for i in out.BalanceEdge:
# print i
#print out.Requirement
#print out.TimetoExecute
#print out.Result
#sys.stdout.write( out.DateTime + ',' )
#sys.stdout.write( out.Strategy + ',' )
#sys.stdout.write( out.TimetoLoad + ',' )
#sys.stdout.write( out.TimetoPartition + ',' )
#sys.stdout.write( out.Data + ',' )
#sys.stdout.write( out.ThreshHold + ',' )
#sys.stdout.write( out.Vertices + ',' )
#sys.stdout.write( out.Edges + ',' )
#sys.stdout.write( out.Replications + ',' )
#sys.stdout.write( out.NumParts + ',' )
#sys.stdout.write( out.Factor + ',' )
#for i in out.BalanceVertex:
# sys.stdout.write( i + ',' )
#for i in out.BalanceEdge:
# sys.stdout.write( i + ',' )
#sys.stdout.write( out.Requirement + ',' )
#sys.stdout.write( out.TimetoExecute + ',' )
#sys.stdout.write( out.Result + ',' )
#sys.stdout.write( out.ShuffleRead + ',' )
#sys.stdout.write( out.ShuffleWrite + '\n' )
sys.stdout.write( out.DateTime + '\t' )
sys.stdout.write( out.Strategy + '\t' )
sys.stdout.write( out.TimetoLoad + '\t' )
sys.stdout.write( out.TimetoPartition + '\t' )
sys.stdout.write( out.Data + '\t' )
sys.stdout.write( out.ThreshHold + '\t' )
sys.stdout.write( out.Vertices + '\t' )
sys.stdout.write( out.Edges + '\t' )
sys.stdout.write( out.Replications + '\t' )
sys.stdout.write( out.NumParts + '\t' )
sys.stdout.write( out.Factor + '\t' )
for i in out.BalanceVertex:
sys.stdout.write( i + '\t' )
for i in out.BalanceEdge:
sys.stdout.write( i + '\t' )
sys.stdout.write( out.Requirement + '\t' )
sys.stdout.write( out.TimetoExecute + '\t' )
sys.stdout.write( out.Result + '\t' )
sys.stdout.write( out.ShuffleRead + '\t' )
sys.stdout.write( out.ShuffleWrite + '\n' )
|
import pytest
import numpy as np
from numba import types
from numba.typed import List
from hmmkay.utils import (
_check_array_sums_to_1,
make_proba_matrices,
make_observation_sequences,
check_sequences,
)
def test_make_observation_sequences():
# basic tests for shape, types and values
# test with constant number of observations
n_seq, n_observable_states, n_obs_min, n_obs_max = 10, 3, 12, None
sequences = make_observation_sequences(
n_seq=n_seq,
n_observable_states=n_observable_states,
n_obs_min=n_obs_min,
n_obs_max=n_obs_max,
random_state=0,
)
assert isinstance(sequences, np.ndarray)
assert sequences.dtype == np.int32
assert sequences.shape == (n_seq, n_obs_min)
assert np.all(np.unique(sequences) == np.arange(n_observable_states))
# test with non-constant number of observations
n_seq, n_observable_states, n_obs_min, n_obs_max = 10, 3, 12, 20
sequences = make_observation_sequences(
n_seq=n_seq,
n_observable_states=n_observable_states,
n_obs_min=n_obs_min,
n_obs_max=n_obs_max,
random_state=0,
)
assert isinstance(sequences, List)
for seq in sequences:
assert isinstance(seq, np.ndarray)
assert seq.dtype == np.int32
assert seq.ndim == 1
assert n_obs_min <= seq.shape[0] < n_obs_max
assert np.all(np.unique(seq) == np.arange(n_observable_states))
def test_make_proba_matrices():
# Make sure matrices rows sum to 1
n_hidden_states = 10
pi, A, B = make_proba_matrices(n_hidden_states=n_hidden_states, random_state=0)
_check_array_sums_to_1(pi)
for s in range(n_hidden_states):
_check_array_sums_to_1(A[s])
_check_array_sums_to_1(B[s])
def _make_typed_list(): # helper for test below
l = List.empty_list(types.int32[:])
l.append(np.array([1, 2, 3], dtype=np.int32))
l.append(np.array([0, 1, 3, 5], dtype=np.int32))
return l
@pytest.mark.parametrize(
"sequences, expected_type, expected_longest_length",
[
([[1, 2], [1, 2, 3]], List, 3),
(_make_typed_list(), List, 4),
(np.arange(20).reshape(4, 5), np.ndarray, 5),
],
)
def check_sequences(sequences, expected_type, expected_longest_length):
sequences, longest_length = check_sequences(sequences, return_longest_length=True)
assert isinstance(sequences, expected_type)
assert longest_length == expected_longest_length
|
import pandas as pd
import numpy as np
from fast_ml.utilities import rare_encoding
class FeatureEngineering_Categorical:
def __init__(self, model=None, method='label', drop_last=True, n_frequent=None):
'''
Parameters:
-----------
model = default is None. Most of the encoding methods can be used for both classification and regression problems. however only 2 methods require model to be defined as 'classification' or 'clf'
method =
'one-hot' : for one hot encoding
'integer' or 'label' : converts categories into codes
'count' : converts categories into the count of occurrences
'freq' : converts categories into the freq of occurrences
'ordered-label' : converts categories into codes but in the descending order of occurrences
Target encoding methods
'target-ordered' : converts categories into codes but in the descending order of mean target value
'target-mean' : converts categories into mean target value
'target-prob' : only for classification models, takes the ratio of target =1 / target =0
'target-woe' : only for classification models, calculates the weight of evidence(woe) for each category and replaces the value for that
drop_last = if method = 'one-hot' then use this parameter to drop last category
n_frequent = if method = 'one-hot' then use this parameter to get only the top n categories
'''
self.method = method
self.drop_last = drop_last
self.n_frequent = n_frequent
self.model = model
def fit(self, df, variables, target=None):
'''
Parameters:
-----------
df = training dataset
variables = list of all the categorical variables
target = target variable if any target encoding method is used
'''
self.param_dict_ = {}
if self.method == 'one-hot' or self.method == 'onehot':
for var in variables:
cats = list(df[var].unique())
if drop_last:
self.param_dict_[var] = cats[0:-1]
else:
self.param_dict_[var] = cats
if self.method == 'integer' or self.method == 'label':
for var in variables:
self.param_dict_[var] = {cat:ix for ix, cat in enumerate(df[var].unique())}
if self.method == 'count':
for var in variables:
self.param_dict_[var] = df[var].value_counts().to_dict()
if self.method == 'freq' or self.method == 'frequency':
for var in variables:
self.param_dict_[var] = (df[var].value_counts()/len(df[var])).to_dict()
if self.method == 'ordered_label':
for var in variables:
s = df[var].value_counts()
self.param_dict_[var] = {cat:ix for ix, cat in enumerate(s.index)}
# Target Encoding
if self.method == 'target_ordered':
for var in variables:
s = df.groupby(var)[target].mean()
self.param_dict_[var] = {cat:ix for ix, cat in enumerate(s.index)}
if self.method == 'target_mean':
for var in variables:
s = df.groupby(var)[target].mean()
self.param_dict_[var] = {cat:ix for ix, cat in s.items()}
if (self.model =='classification' or self.model == 'clf'):
if self.method =='target_prob_ratio':
for var in variables:
prob_df = pd.DataFrame(ds.groupby(var)[target].mean())
prob_df.columns = ['target_1']
prob_df['target_0'] = 1 - prob_df['target_1']
prob_df['ratio'] = prob_df['target_1']/prob_df['target_0']
self.param_dict_[var] = prob_df['ratio'].to_dict()
if (self.model =='classification' or self.model == 'clf'):
if self.method =='target_woe':
for var in variables:
woe_df = pd.DataFrame(pd.crosstab(df[var], df[target], normalize='columns').mul(100))
woe_df.rename(columns={0: "Target_0_Per", 1: "Target_1_Per"},inplace=True)
woe_df['WOE'] = np.log(woe_df['Target_1_Per']/woe_df['Target_0_Per'])
self.param_dict_[var] = woe_df['WOE'].to_dict()
return None
def transform(self, df):
'''
Parameters:
-----------
df = training dataset
variables = list of all the categorical variables
target = target variable if any target encoding method is used
Returns:
--------
dataframe with all the variables encoded
'''
if self.method == 'one-hot' or self.method == 'onehot':
for var, mapper in self.param_dict_:
for category in mapper:
df[str(var) + '_' + str(category)] = np.where(df[var] ==category , 1, 0)
else:
for var, mapper in self.param_dict_.items():
df[var] = df[var].map(mapper)
return df
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from .uiutils import ChildFinder
import sys
@Gtk.Template(filename=sys.path[0] + "/dungeon/ui/keypadwidget.glade")
class KeypadWidget(Gtk.Box, ChildFinder):
__gtype_name__ = "KeypadWidget"
def __init__(self, length=5, default=None):
Gtk.Box.__init__(self)
self.value = self.find_child('value')
if default is not None:
self.value.set_label(default)
else:
self.value.set_label('')
self.length = length
self.show_all()
def result(self):
return int(self.value.get_label())
@Gtk.Template.Callback()
def onClick(self, caller):
lbl = self.value.get_label()
if lbl.startswith('0'):
lbl = lbl[1:]
if len(lbl) < self.length:
lbl += caller.get_label()
self.value.set_label(lbl)
@Gtk.Template.Callback()
def onBackspace(self, caller):
lbl = self.value.get_label()
if lbl:
lbl = lbl[:-1]
self.value.set_label(lbl)
@Gtk.Template.Callback()
def onClear(self, caller):
self.value.set_label("")
@Gtk.Template.Callback()
def onKeyPress(self, caller, event):
if ord(event.string[0]) == 8:
self.onBackspace(caller)
elif ord(event.string[0]) == 27:
self.onClear(caller)
elif '0' <= event.string <= '9':
lbl = self.value.get_label()
if len(lbl) < self.length:
lbl += event.string
self.value.set_label(lbl)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.