content
stringlengths 5
1.05M
|
|---|
import pymel.core as pm
import pulse.joints
from pulse.buildItems import BuildAction, BuildActionError
class CleanupJointsAction(BuildAction):
def validate(self):
if not self.rootJoint:
raise BuildActionError('rootJoint must be set')
def run(self):
if self.removeEndJoints:
endJoints = pulse.joints.getEndJoints(self.rootJoint)
pm.delete(endJoints)
if self.disableScaleCompensate:
allJoints = self.rootJoint.listRelatives(ad=True, typ='joint')
for joint in allJoints:
joint.segmentScaleCompensate.set(False)
|
##################################################################
#
# Imports
#
from enum import Enum
from coventreiya.morphology.affix.infix import Consonant as Cons_affix_abc
from coventreiya.morphology.affix.infix import Cons_infix_matcher
from coventreiya.morphology.affix.infix import Vowel as Vol_affix_abc
from coventreiya.morphology.affix.abc import set_abc as affix_set
from coventreiya.phonotactics.onsets.ver_1_5_7 import ver_1_5_7 as onset_match
from coventreiya.phonotactics.nucleus.ver_1_5_7 import ver_1_5_7 as nucleus_match
from coventreiya.phonotactics.codas.ver_1_5_7 import ver_1_5_7 as coda_match
from coventreiya.utils.fsm import fsm_state
from coventreiya.utils.lists import is_item_list
from coventreiya.utils.gen import gen_list, gen_str1
from coventreiya.utils.gen import gen_actual, gen_unique
##################################################################
#
# Coventreiya Morphology - Affix - Consonant Infixxes' Base Class
#
class Consonants(Cons_affix_abc):
def __init__(self, name="", value=None, cons_=None, is_onset=False,
has_neg=False, has_def=True, has_alt=False, has_null=True):
if bool(is_onset):
match_ = onset_match()
else:
match_ = coda_match()
super().__init__(name, value, cons_, match_, is_onset,
has_neg, has_def, has_alt, has_null,
1, 3, 5, None)
pass
#def gen_Consonants_lambda( is_onset=False, has_neg=False, has_def=True,
# has_alt=False, has_null=True ):
# return lambda str_: Consonants( str_, is_onset, has_neg, has_def,
# has_alt, has_null)
##################################################################
#
# Coventreiya Morphology - Affix - Consonant Infixxes' Actual Classes
#
class Animacy(affix_set):
class enum_(int, Enum):
#min_ = 0
Inanimate_ = 0
#Inanimate_Energy_ = 1 # is this needed?
# special [sub]categories of the possibly Inanimate
Spatial_Location_ = 1
Temporal_Location_ = 2 # aka an Event
Concept_ = 3
# the Full Categories of the Animate
Viroid_ = 4
Protists_ = 5
Microbes_ = 6
Fungi_ = 7
Plant_ = 8
Animalistic_ = 9
#the compounds
Spatial_Temporal_Location_ = 10
# Inanimate_:
Inanimate_Spatial_Location_ = 11
Inanimate_Temporal_Location_ = 12
Inanimate_Spatial_Temporal_Location_ = 13
# Concept_:
Concept_Spatial_Location_ = 14
Concept_Temporal_Location_ = 15
Concept_Spatial_Temporal_Location_ = 16
# Viroid_:
Viroid_Spatial_Location_ = 17
Viroid_Temporal_Location_ = 18
Viroid_Spatial_Temporal_Location_ = 19
# Protists_:
Protists_Spatial_Location_ = 20
Protists_Temporal_Location_ = 21
Protists_Spatial_Temporal_Location_ = 22
# Microbes_:
Microbes_Spatial_Location_ = 23
Microbes_Temporal_Location_ = 24
Microbes_Spatial_Temporal_Location_ = 25
# Fungi_:
Fungi_Spatial_Location_ = 26
Fungi_Temporal_Location_ = 27
Fungi_Spatial_Temporal_Location_ = 28
# Plant_:
Plant_Spatial_Location_ = 29
Plant_Temporal_Location_ = 30
Plant_Spatial_Temporal_Location_ = 31
# Animalistic_:
Animalistic_Spatial_Location_ = 32
Animalistic_Temporal_Location_ = 33
Animalistic_Spatial_Temporal_Location_ = 34
## :
#Spatial_Location_ =
#Temporal_Location_ =
#Spatial_Temporal_Location_ =
#max_ = 10
def __init__(self):
is_onset=False
has_neg=False
has_def=True
has_alt=False
has_null=False
name_ = "Animacy"
func_ = lambda str_, value: Consonants( name_, value, str_,
is_onset, has_neg,
has_def, has_alt,
has_null )
super().__init__( Consonants, name_, has_neg, has_def,
func_( [ "[θ]" ], self.enum_.Inanimate_ ),
self.enum_.Inanimate_,
None, 1,3,5, None )
func2_ = lambda str_, value: super()[ value ] = func_( str_, value )
# begin constructing the set's contents here:
#func2_( [ "[ɡ]" ], self.enum_.Inanimate_Energy_ )
func2_( [ "[ɫ]" ], self.enum_.Spatial_Location_ )
func2_( [ "[t]" ], self.enum_.Temporal_Location_ )
func2_( [ "[d]" ], self.enum_.Concept_ )
func2_( [ "[ʃ]" ], self.enum_.Viroid_ )
func2_( [ "[ɹ̠]" ], self.enum_.Protists_ )
func2_( [ "[k]" ], self.enum_.Microbes_ )
func2_( [ "[f]" ], self.enum_.Fungi_ )
func2_( [ "[p]" ], self.enum_.Plant_ )
func2_( [ "[x]" ], self.enum_.Animalistic_ )
#the compounds
func2_( [ "[ɫ]", "[t]" ], self.enum_.Spatial_Temporal_Location_ )
# Inanimate_:
func2_( [ "[ɫ]", "[θ]" ], self.enum_.Inanimate_Spatial_Location_ )
func2_( [ "[θ]", "[t]" ], self.enum_.Inanimate_Temporal_Location_ )
func2_( [ "[ɫ]", "[θ]", "[t]" ], self.enum_.Inanimate_Spatial_Temporal_Location_ )
# Concept_:
func2_( [ "[ɫ]", "[d]" ], self.enum_.Concept_Spatial_Location_ )
func2_( [ "[d]", "[t]" ], self.enum_.Concept_Temporal_Location_ )
func2_( [ "[ɫ]", "[d]", "[t]" ], self.enum_.Concept_Spatial_Temporal_Location_ )
# Viroid_:
func2_( [ "[ɫ]", "[ʃ]" ], self.enum_.Viroid_Spatial_Location_ )
func2_( [ "[ʃ]", "[t]" ], self.enum_.Viroid_Temporal_Location_ )
func2_( [ "[ɫ]", "[ʃ]", "[t]" ], self.enum_.Viroid_Spatial_Temporal_Location_ )
# Protists_:
func2_( [ "[ɹ̠]","[ɫ]" ], self.enum_.Protists_Spatial_Location_ )
func2_( [ "[ɹ̠]", "[t]" ], self.enum_.Protists_Temporal_Location_ )
func2_( [ "[ɹ̠]","[ɫ]", "[t]" ], self.enum_.Protists_Spatial_Temporal_Location_ )
# Microbes_:
func2_( [ "[ɫ]", "[k]" ], self.enum_.Microbes_Spatial_Location_ )
func2_( [ "[k]", "[t]" ], self.enum_.Microbes_Temporal_Location_ )
func2_( [ "[ɫ]", "[k]", "[t]" ], self.enum_.Microbes_Spatial_Temporal_Location_ )
# Fungi_:
func2_( [ "[ɫ]", "[f]" ], self.enum_.Fungi_Spatial_Location_ )
func2_( [ "[f]", "[t]" ], self.enum_.Fungi_Temporal_Location_ )
func2_( [ "[ɫ]", "[f]", "[t]" ], self.enum_.Fungi_Spatial_Temporal_Location_ )
# Plant_:
func2_( [ "[ɫ]", "[p]" ], self.enum_.Plant_Spatial_Location_ )
func2_( [ "[p]", "[t]" ], self.enum_.Plant_Temporal_Location_ )
func2_( [ "[ɫ]", "[p]", "[t]" ], self.enum_.Plant_Spatial_Temporal_Location_ )
# Animalistic_:
func2_( [ "[ɫ]", "[x]" ], self.enum_.Animalistic_Spatial_Location_ )
func2_( [ "[x]", "[t]" ], self.enum_.Animalistic_Temporal_Location_ )
func2_( [ "[ɫ]", "[x]", "[t]" ], self.enum_.Animalistic_Spatial_Temporal_Location_ )
#func2_( , self.enum_. )
#func2_( , self.enum_. )
#func2_( , self.enum_. )
def __setitem__(self, key, value):
"""
Completely disregard *value*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[key]
def add(self, index, item=None, alt_item=None):
"""
Completely disregard *item* and *alt_item*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[index]
pass
class Sentience(affix_set):
class enum_(int, Enum):
#min_ = 0
NotSentient_ = 0
Sentient_ = 1
Supernatural_ = 2
Deity_ = 3
#max_ = 3
def __init__(self):
is_onset=False
has_neg=False
has_def=True
has_alt=True
has_null=True
name_ = "Sentience"
func_ = lambda str_, value, has_alt_=has_alt: Consonants( name_,
value, str_,
is_onset,
has_neg, has_def,
has_alt_,
has_null)
super().__init__( Consonants, name_, has_neg, has_def,
func_( [ "" ] , self.enum_.NotSentient_, False ),
self.enum_.NotSentient_,
None, 1,3,5, None)
func2_ = lambda value, str_, alt_: super().add( value,
func_( str_, value ),
func_( alt_, value ) )
func2_( self.enum_.Sentient_, [ "[w]" ], [ "[j]" ] )
func2_( self.enum_.Supernatural_, [ "[v]" ], [ "[ʕ]" ] )
func2_( self.enum_.Deity_, [ "[ʔ]" ], [ "[l]" ] )
def __setitem__(self, key, value):
"""
Completely disregard *value*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[key]
def add(self, index, item=None, alt_item=None):
"""
Completely disregard *item* and *alt_item*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[index]
pass
class Lexical_Category(affix_set):
class enum_(int, Enum):
#min_ = 0
#NotApplicable_ = 0
Noun_ = 0
Adjective_ = 1
Verb_ = 2
Adverb_ = 3
Gerund_ = 4
#Number_ = 5
#Interjection_ = 6
# add here a number of Number_ Compounds of the above... neah!
#max_ = 5
def __init__(self):
is_onset=False
has_neg=False
has_def=True
has_alt=False
has_null=True
name_ = "Lexical_Category"
func_ = lambda str_, value: Consonants( name_, value, str_,
is_onset, has_neg,
has_def, has_alt,
has_null)
super().__init__( Consonants, name_, has_neg, has_def,
func_( [ "[n]" ], self.enum_.Noun_ ),
self.enum_.Noun_,
None, 1,3,5, None)
func2_ = lambda value, str_: super()[ value ] = func_( str_, value )
func2_( self.enum_.Adjective_, [ "[ŋ]" ] )
func2_( self.enum_.Verb_, [ "[v]" ] )
func2_( self.enum_.Adverb_, [ "[ʒ]" ] )
func2_( self.enum_.Gerund_, [ "[m]" ] )
#func2_( self.enum_.Number_, [ "[s]" ] )
# add here a number of Number_ Compounds of the above... neah!
#func2_( self.enum_.Interjection_, [ "[j]" ] )
def __setitem__(self, key, value):
"""
Completely disregard *value*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[key]
def add(self, index, item=None, alt_item=None):
"""
Completely disregard *item* and *alt_item*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[index]
pass
def gen_Consonant_Infixes():
return [ Lexical_Category(), Animacy(), Sentience() ]
class __Cons_Infix_matcher_1_3_5(Cons_infix_matcher):
def __init__(self):
min_length = 1
max_length = 3
super().__init__(min_length, max_length, 1, 3, 5, None)
def matcher(self):
"""Returns the Match Data Object."""
pass
def matcher_func(self, inp_, mat_):
"""
The Function that DOES the Matching based
on some Input (*inp_*) and some Match Data Object...
"""
pass
def categories(self):
''' Generate the Categories Lists. '''
return gen_Consonant_Infixes()
def replacment_map(self):
''' Generate the Replacement Map. '''
cat_ = self.categories()
return { 0 : cat_[0],
1 : cat_[1],
2 : cat_[2] }
pass
def gen_all_Consonant_sets():
"""
Returns a list of lists.
Each member list will contain
one possible variant of Consonant Infix Sets.
"""
cat_ = gen_Consonant_Infixes()
repl_map = { }
for i in range(0, len(cat_)):
repl_map[i] = cat_[i]
res = [ [0,],
[0,1,]
[0,1,2,], ]
return gen_actual(res, repl_map)
##################################################################
#
# Coventreiya Morphology - Affix - Vowel Infixxes' Base Class
#
class Vowels(Vol_affix_abc):
def __init__( self, name="", value=None, vowel_=None, has_neg=False, has_def=True,
has_alt=False, has_null=True ):
matcher = __nucleus_match()
super().__init__(name,value,vowel_,matcher,has_neg,
has_def,has_alt,has_null,
1, 3, 5, None)
pass
#def gen_Vowels_lambda( has_neg=False, has_def=True, has_alt=False,
# has_null=True ):
# return lambda str_: Vowels( str_, has_neg, has_def, has_alt,
# has_null )
##################################################################
#
# Coventreiya Morphology - Affix - Vowel Infixxes' Actual Classes
#
class system1(affix_set):
def __init__(self, name_="", enum_cls=None):
if (issubclass(enum_cls, int) issubclass(enum_cls, Enum)):
self.__enum = enum_cls
else:
raise TypeError()
has_neg=False
has_def=True
has_alt=False
has_null=False
d = dict({ 0 : [ "[ä]" ],
1 : [ "[e]" ],
2 : [ "[ʊ]" ],
3 : [ "[ɛ]" ] })
func_ = lambda val_: Vowels( name_, val_, d[val_.value],
has_neg, has_def, has_alt, has_null )
super().__init__( Vowels, name_, has_neg, has_def,
func_( enum_cls(0) ),
enum_cls(0).value, None, 1,3,5,None)
for i in enum_cls:
if i.value != 0:
super()[i] = func_( i )
def __setitem__(self, key, value):
"""
Completely disregard *value*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[key]
def add(self, index, item=None, alt_item=None):
"""
Completely disregard *item* and *alt_item*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[index]
pass
class system2(affix_set):
def __init__(self, name_="", enum_cls=None):
if (issubclass(enum_cls, int) and issubclass(enum_cls, Enum)):
self.__enum = enum_cls
else:
raise TypeError()
has_neg=False
has_def=True
has_alt=False
has_null=True
d = dict({ 0 : [ "" ],
1 : [ "[i]" ],
2 : [ "[u]" ],
3 : [ "[o̞]" ],
4 : [ "[ɔ]" ],
5 : [ "[æ]" ],
6 : [ "[œ]" ] })
func_ = lambda val_, has_alt_=has_alt: Vowels( name_, val_,
d[val_.value],
has_neg, has_def,
has_alt_,
has_null )
func2_ = lambda str_, val_, has_alt_=has_alt: Vowels( name_,
val_, str_,
has_neg, has_def,
has_alt_,
has_null )
super().__init__( Vowels, name_, has_neg, has_def,
func_( enum_cls(0), True ),
enum_cls(0).value,
func2_( [ "[ɪ]" ], enum_cls(0), True ),
1,3,5,None)
for i in enum_cls:
if i.value != 0:
super()[i] = func_( i )
def __setitem__(self, key, value):
"""
Completely disregard *value*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[key]
def add(self, index, item=None, alt_item=None):
"""
Completely disregard *item* and *alt_item*
- this method is supposed to hide the super-classes method
and prevent the adding of values...
"""
return super()[index]
pass
########################################################################
#
# A pair of Utility classes..
#
class all_of_system2(system2):
class enum_(int,Enum):
Zeroth_ = 0
First_ = 1
Second_ = 2
Third_ = 3
Fourth_ = 4
Fifth_ = 5
Sixth_ = 6
def __init__(self, name_=""):
super().__init__( name_, self.enum_ )
pass
class all_of_system1(system1):
class enum_(int,Enum):
Zeroth_ = 0
First_ = 1
Second_ = 2
Third_ = 3
def __init__(self, name_=""):
super().__init__( name_, self.enum_ )
pass
########################################################################
#
# The actual Vowel Infix classes..
#
class Modality(system2):
class enum_(int, Enum):
#min_ = 0
Indicative_ = 0
Declarative_ = 1
Interrogative_ = 2
Jussive_ = 3
#NotApplicable_ = 4 # empty slot!!!
Imperative_ = 5
#max_ = 5
def __init__(self):
super().__init__( "Modality", self.enum_ )
pass
class Abstractivity(system1):
class enum_(int, Enum):
#min_ = 0
Concrete_ = 0
Virtual_ = 1
Abstract_ = 2
#max_ = 2
def __init__(self):
super().__init__( "Abstractivity", self.enum_ )
pass
def gen_Base_Vowel_Infixes():
return [ Abstractivity(), Modality() ] # 0 , 1
class Verb_Transitivity(system2):
class enum_(int, Enum):
#min_ = 0
Intransitive_ = 0
Transitive_ = 1
Ditransitive_ = 2
Tritransitive_ = 3
#max_ = 3
def __init__(self):
super().__init__( "Verb.Transitivity", self.enum_ )
pass
class Verb_Type(system1):
class enum_(int, Enum):
#min_ = 0
Base_ = 0
Derived1_ = 1
Derived2_ = 2
#max_ = 2
def __init__(self):
super().__init__( "Verb.Type", self.enum_ )
pass
class Verb_Valency(system2):
class enum_(int, Enum):
#min_ = 0
Normative_ = 0
Reciprocal_ = 1
ToDoCausative_ = 2
ToBeCausative_ = 3
#max_ = 1
def __init__(self):
super().__init__( "Verb.Valency", self.enum_ )
pass
class Verb_Voice(system1):
class enum_(int, Enum):
#min_ = 0
Active_ = 0
Passive_ = 1
Reflexive_ = 2
#max_ = 2
def __init__(self):
super().__init__( "Verb.Voice", self.enum_ )
pass
def gen_Verb_Vowel_Infixes():
return [ Verb_Transitivity(), Verb_Type(), # 0 , 1
Verb_Valency(), Verb_Voice() ] # 2, 3
def gen_all_Verb_Vowel_sets():
"""
Returns a list of lists.
Each member list will contain
one possible variant of Verb Vowel Infix Sets.
"""
cat_ = gen_Base_Vowel_Infixes()
cat_.extend(gen_Verb_Vowel_Infixes())
repl_map = {}
for i in range(0,len(cat_)):
repl_map[i] = cat_[i]
res_ = list()
# for the 1 syllable length stems:
res_.append([0,])
res_.append([1,])
res_.append([0,1,])
# for the 2 syllable length stems:
res_.append([0,4,5,])
res_.append([1,4,5,])
res_.append([0,1,4,5,])
# for the rest:
res_.append([0,1,2,3,4,5,])
return gen_actual(res, repl_map)
class Noun_Countability(system2):
class enum_(int, Enum):
#min_ = 0
NonCountable_ = 0
Countable_ = 1
Mass_ = 2
#max_ = 1
def __init__(self):
super().__init__( "Noun.Countability", self.enum_ )
pass
class Noun_Individuality(system1):
class enum_(int, Enum):
#min_ = 0
Indivual_ = 0
Collective_ = 1
#max_ = 1
def __init__(self):
super().__init__( "Noun.Individuality", self.enum_ )
pass
class Noun_Type(system2):
class enum_(int, Enum):
#min_ = 0
Common_ = 0
Derived1_ = 1
Derived2_ = 2
#max_ = 1
def __init__(self):
super().__init__( "Noun.Type", self.enum_ )
pass
class Noun_Dependency(system1):
class enum_(int, Enum):
#min_ = 0
Independent_ = 0
Dependent_ = 1
ConstructState_ = 2
#max_ = 1
def __init__(self):
super().__init__( "Noun.Dependency", self.enum_ )
pass
def gen_Noun_Vowel_Infixes():
return [ Noun_Type(), Noun_Dependency(), # 0 , 1
Noun_Countability(), Noun_Individuality() ] # 2, 3
def __gen_all_Noun_Vowel_sets():
"""
Returns a list of lists.
Each member list will contain
one possible variant of Noun Vowel Infix Sets.
"""
cat_ = gen_Base_Vowel_Infixes()
cat_.extend(gen_Noun_Vowel_Infixes())
repl_map = {}
for i in range(0,len(cat_)):
repl_map[i] = cat_[i]
res_ = list()
# for the 1 syllable length stems:
res_.append([0,])
res_.append([1,])
res_.append([0,1,])
# for the 2 syllable length stems:
res_.append([0,4,5,])
res_.append([1,4,5,])
res_.append([0,1,4,5,])
# for the rest:
res_.append([0,1,2,3,4,5,])
return gen_actual(res, repl_map)
|
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os.path import join, exists
from functools import partial
from os import makedirs, stat
import pandas as pd
from h5py import File
from qiita_client import ArtifactInfo
from qiita_files.demux import to_hdf5
def get_artifact_information(qclient, artifact_id, out_dir):
"""Retrieves the artifact information for running split libraries
Parameters
----------
qclient : tgp.qiita_client.QiitaClient
The Qiita server client
artifact_id : str
The artifact id
out_dir : str
The output directory
Returns
-------
dict, str, str
The artifact filepaths keyed by type
The artifact Qiime-compliant mapping file path
The artifact type
"""
# Get the artifact filepath information
artifact_info = qclient.get("/qiita_db/artifacts/%s/" % artifact_id)
fps = artifact_info['files']
# Get the artifact type
artifact_type = artifact_info['type']
# Get the artifact metadata
prep_info = qclient.get('/qiita_db/prep_template/%s/'
% artifact_info['prep_information'][0])
df = pd.read_csv(prep_info['prep-file'], sep='\t', dtype='str',
na_values=[], keep_default_na=False)
df.set_index('sample_name', inplace=True)
# rename columns to match QIIME 1 required columns
columns = df.columns.values.tolist()
sort_columns = []
if 'barcode' in columns:
df.rename(columns={'barcode': 'BarcodeSequence'}, inplace=True)
sort_columns.append('BarcodeSequence')
if 'primer' in columns:
df.rename(columns={'primer': 'LinkerPrimerSequence'}, inplace=True)
sort_columns.append('LinkerPrimerSequence')
if 'reverselinkerprimer' in columns:
df.rename(columns={'reverselinkerprimer': 'ReverseLinkerPrimer'},
inplace=True)
sort_columns.append('ReverseLinkerPrimer')
# by design the prep info file doesn't have a Description column so we can
# fill without checking
df['Description'] = 'XXQIITAXX'
# sorting columns to be a valid "classic" QIIME1 mapping file
columns = df.columns.values.tolist()
if 'BarcodeSequence' in columns:
columns.remove('BarcodeSequence')
if 'LinkerPrimerSequence' in columns:
columns.remove('LinkerPrimerSequence')
columns.remove('Description')
sort_columns.extend(columns)
sort_columns.append('Description')
df = df[sort_columns]
qiime_map = join(out_dir, 'qiime-mapping-file.txt')
df.index.name = '#SampleID'
df.to_csv(qiime_map, sep='\t')
return fps, qiime_map, artifact_type
def split_mapping_file(mapping_file, out_dir):
"""Splits a QIIME-compliant mapping file by run_prefix
Parameters
----------
mapping_file : str
The mapping file filepath
out_dir : str
The path to the output directory
Returns
-------
list of str
The paths to the splitted mapping files
"""
mf = pd.read_csv(mapping_file, delimiter='\t', dtype=str, encoding='utf-8')
mf.set_index('#SampleID', inplace=True)
path_builder = partial(join, out_dir)
if 'run_prefix' in mf:
if not exists(out_dir):
makedirs(out_dir)
output_fps = []
for prefix, df in mf.groupby('run_prefix'):
out_fp = path_builder('%s_mapping_file.txt' % prefix)
output_fps.append(out_fp)
df.to_csv(out_fp, index_label='#SampleID', sep='\t',
encoding='utf-8')
else:
output_fps = [mapping_file]
return output_fps
def generate_demux_file(sl_out):
"""Creates the HDF5 demultiplexed file
Parameters
----------
sl_out : str
Path to the output directory of split libraries
Returns
-------
str
The path of the demux file
Raises
------
ValueError
If the split libraries output does not contain the demultiplexed fastq
file
"""
fastq_fp = str(join(sl_out, 'seqs.fastq'))
if not exists(fastq_fp):
raise ValueError("The split libraries output directory does not "
"contain the demultiplexed fastq file.")
elif stat(fastq_fp).st_size == 0:
raise ValueError("No sequences were demuxed. Check your parameters.")
demux_fp = join(sl_out, 'seqs.demux')
with File(demux_fp, "w") as f:
to_hdf5(fastq_fp, f)
return demux_fp
def generate_artifact_info(sl_out):
"""Creates the artifact information to attach to the payload
Parameters
----------
sl_out : str
Path to the split libraries output directory
Returns
-------
list of [str, str, list of (str, str)]
The artifacts information to include in the payload when the split
libraries job is completed.
- The command output name
- The artifact type
- The list of filepaths with their artifact type
"""
path_builder = partial(join, sl_out)
filepaths = [(path_builder('seqs.fna'), 'preprocessed_fasta'),
(path_builder('seqs.fastq'), 'preprocessed_fastq'),
(path_builder('seqs.demux'), 'preprocessed_demux'),
(path_builder('split_library_log.txt'), 'log')]
return [ArtifactInfo('demultiplexed', 'Demultiplexed', filepaths)]
|
# python argparse11.py 4
# python argparse11.py 4 -v
# python argparse11.py 4 -vv
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("x", type=int, help="the base")
parser.add_argument("y", type=int, help="the exponent")
parser.add_argument("-v", "--verbose", action="count", default=0)
args = parser.parse_args()
answer = args.x ** args.y
if args.verbose >= 2:
print("{} to the power {} equals {}".format(args.x, args.y, answer))
elif args.verbose >= 1:
print("{}^{} == {}".format(args.x, args.y, answer))
else:
print(answer)
|
import pytest
from treehugger.s3 import split_s3_url
def test_split_s3_url():
test_s3_url = 's3://bucket/key?versionId=7'
assert split_s3_url(test_s3_url) == ('bucket', 'key', '7')
def test_no_version_split_s3_url():
test_s3_url = 's3://bucket/key'
with pytest.raises(SystemExit):
split_s3_url(test_s3_url)
def test_incorrect_scheme_split_s3_url():
test_s3_url = 'https://bucket/key?versionId=7'
with pytest.raises(SystemExit):
split_s3_url(test_s3_url)
|
from sqlalchemy import create_engine, Column, Integer, String, DATETIME, TEXT, and_, or_
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from datetime import datetime
# TODO: db_uri
# dialect+driver://username:password@host:port/database?charset=utf8
DB_URI = 'mysql+pymysql://root:root123@127.0.0.1:3300/first_sqlalchemy?charset=utf8'
engine = create_engine(DB_URI)
Base = declarative_base(bind=engine)
session = sessionmaker(bind=engine)()
class Article(Base):
__tablename__ = 'article'
id = Column(Integer, primary_key=True, autoincrement=True)
title = Column(String(11), nullable=False)
price = Column(Integer, default=0, nullable=False)
create_time = Column(DATETIME, default=datetime.now)
context = Column(TEXT)
def __repr__(self):
return '<Article(title={title}, price={price})>'.format(title=self.title, price=self.price)
# TODO: 1.equals
result1 = session.query(Article).filter(Article.title == 'title0').first()
print(result1)
# TODO: 2.not equals
result2 = session.query(Article).filter(Article.title != 'title0').all()
print(result2)
# TODO: 3.like & ilike(不区分大小写)
# result3 = session.query(Article).filter(Article.title.like('title%')).all()
result3 = session.query(Article).filter(Article.title.ilike('title%')).all()
print(result3)
# TODO: 4.in
result4 = session.query(Article).filter(Article.title.in_(['title0', '苹果', '桃子'])).all()
print(result4)
# TODO: 5.not in
result5 = session.query(Article).filter(Article.title.notin_(['title0', '苹果', '桃子'])).all()
print(result5)
# TODO: 6.is null
result6 = session.query(Article).filter(Article.create_time == None).all()
print(result6)
# TODO: 7.is not null
result7 = session.query(Article).filter(Article.create_time != None).all()
print(result7)
# TODO: 8.and
# TODO: 8.1 传递多个参数
result8 = session.query(Article).filter(Article.title == '桃子', Article.context == '桃子').all()
print(result8)
# TODO: 8.2 使用and_函数
result9 = session.query(Article).filter(and_(Article.title == '桃子', Article.context == '桃子')).all()
print(result9)
# TODO: 9.or
result10 = session.query(Article).filter(or_(Article.title == '桃子', Article.context == '桃子')).all()
print(result10)
"""
SELECT article.id AS article_id, article.title AS article_title, article.price AS article_price, article.create_time AS article_create_time, article.context AS article_context
FROM article
WHERE article.title = %(title_1)s OR article.context = %(context_1)s
"""
|
import os
from setuptools import find_packages, setup
NAME = 'wagtail_gallery'
DESCRIPTION = 'A simple gallery app built for Wagtail CMS and Django'
URL = 'https://gitlab.com/dfmeyer/wagtail_gallery'
EMAIL = 'me3064@gmail.com'
AUTHOR = 'Daniel F. Meyer'
REQUIRES_PYTHON = '>=3.5'
VERSION = '0.1.1'
LICENSE = 'MIT'
KEYWORDS = ['Django', 'Wagtail', 'gallery']
PROJECT_URLS = {
"Bug Tracker": "https://gitlab.com/dfmeyer/wagtail_gallery/issues",
"Documentation": "https://wagtail-gallery.readthedocs.io/en/latest/",
"Source Code": "https://gitlab.com/dfmeyer/wagtail_gallery",
}
REQUIRED = ['wagtail', 'django-social-share', ]
here = os.path.abspath(os.path.dirname(__file__))
try:
with open('README.rst') as f:
LONG_DESCRIPTION = f.read()
except FileNotFoundError:
LONG_DESCRIPTION = DESCRIPTION
setup(
name=NAME,
version=VERSION,
packages=find_packages(),
include_package_data=True,
url=URL,
license=LICENSE,
author=AUTHOR,
author_email=EMAIL,
description=DESCRIPTION,
long_description = LONG_DESCRIPTION,
long_description_content_type='text/x-rst',
python_requires=REQUIRES_PYTHON,
install_requires=REQUIRED,
keywords=KEYWORDS,
project_urls=PROJECT_URLS,
)
|
"""Defines the Riemann language"""
from sympy import Matrix, Symbol
class Conserved (Symbol):
"""Represents a conserved variable
For multi-dimension variables, the dimension can be set with fields
or with dim.
The given a list of expressions, jacobian will compute the jacobian of the
expressions with respect to the fields of the conserved variable.
Examples:
>>> q = Conserved('q', dim=2)
>>> q_0, q_1 = q.fields()
>>> q.jacobian([.5*q_0+q_1, q_1**2])
[0.5, 1]
[ 0, 2*q_1]
>>> nq = Conserved('nq')
>>> u, uh = nq.fields(['u', 'uh'])
>>> nq.jacobian([.5*u+uh, uh**2])
[0.5, 1]
[ 0, 2*uh]
"""
def __new__(cls, name, dim=None):
obj = Symbol.__new__(cls, name)
obj.dim = dim
obj._fields = None
obj._field_names = None
obj._constant_fields = None
return obj
def fields(self, list_of_names=None):
"""Generates and returns the fields inside the conserve fields.
If list of names is given, the fields are named accordingly once
initialized with different names the names field is ignored.
If a dimension wasn't set when the Conserved object was constructed,
the dimension is determined from the number of names. If the number
of names and the dimension are different, an error is thrown.
"""
if self._fields and self._field_names:
return self._fields
if not list_of_names:
list_of_names = ["%s_%d" % (str(self), n) \
for n in xrange(self.dim)]
if self.dim is not None and len(list(list_of_names)) != self.dim:
raise ValueError("Given too many names for dim.")
self._fields = map(Field, list_of_names)
self._field_names = list_of_names
return self._fields
def jacobian(self, flux):
"""Returns the jacobian of the flux expression with respect to the
fields in the conserved variable."""
if not self._fields:
raise ValueError("No fields.")
return Matrix(flux).jacobian(self._fields)
class Field (Symbol):
"""Represents a field of the conserved quantity."""
pass
class ConstantField (Symbol):
"""Represents a field of constants defined on cells."""
pass
class Constant (Symbol):
"""Represents a global constant."""
pass
|
from django.contrib.staticfiles.finders import BaseStorageFinder
from .storage import SassFileStorage
class CssFinder(BaseStorageFinder):
"""
Find static *.css files compiled on the fly using templatetag `{% sass_src "" %}`
and stored in configured storage.
"""
storage = SassFileStorage()
def list(self, ignore_patterns):
"""
Do not list the contents of the configured storages, since this has already been done by
other finders.
This prevents the warning ``Found another file with the destination path ...``, while
issuing ``./manage.py collectstatic``.
"""
if False:
yield
|
"""This script analyzes the signal of a sensor device"""
import itertools
import os
from pathlib import Path
from typing import List, Union, Tuple
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.signal import find_peaks
from tqdm.contrib import tqdm
DATA_PATH = "DATA.bin"
def plot_signal(
df: pd.DataFrame,
columns: List[str],
filename: Union[str, Path],
heartbeat: bool = True,
) -> None:
"""plot the columns of a dataframe
:param df: dataframe
:param columns: columns to plot
:param filename: filename to save the plot
:param heartbeat: if True, plot the heart_beat column as points
"""
plt.figure().set_size_inches(14, 9)
for col in columns:
plt.plot(df[col])
if heartbeat:
heart_beat = df.heart_beat[df.heart_beat == 1]
plt.plot(heart_beat.index, [0] * len(heart_beat.index), "bo")
columns = columns + ["HeartActivityLabel"]
plt.legend(columns)
plt.savefig(filename, bbox_inches="tight")
plt.close()
def calculate_inter_pearson_correlation(segments: List[pd.DataFrame]) -> pd.DataFrame:
"""This method calculates the correlation between the segments for each column.
:param segments: list of segments
:return: dataframe with the inter-correlation
"""
multi_index = [
(k, s)
for k, s in itertools.product(range(len(segments)), range(len(segments)))
if k < s
]
feature_df = pd.DataFrame(
index=multi_index,
columns=["SideLeft", "SideRight", "FrontLeft", "FrontRight", "Back", "Top"],
dtype=np.float32,
)
for k, k_segment in enumerate(segments):
for s, s_segment in enumerate(segments):
if k < s:
for i, col in enumerate(["SideLeft", "SideRight", "FrontLeft", "FrontRight", "Back", "Top"]):
feature_df.xs((k, s))[f"{col}"] = k_segment[col].corr(
s_segment[col]
)
return feature_df
def calculate_intra_pearson_correlation(segments: List[pd.DataFrame]) -> pd.DataFrame:
"""This method calculates the correlation between the columns.
:param segments: list of segments
:return: dataframe with the correlation between
"""
columns = [
f"{icol}-{jcol}"
for (i, icol), (j, jcol) in (
itertools.product(
enumerate(["SideLeft", "SideRight", "FrontLeft", "FrontRight", "Back", "Top"]),
enumerate(["SideLeft", "SideRight", "FrontLeft", "FrontRight", "Back", "Top"]),
)
)
if i < j
]
feature_df = pd.DataFrame(columns=columns, dtype=np.float32)
for k, segment in enumerate(segments):
for i, icol in enumerate(["SideLeft", "SideRight", "FrontLeft", "FrontRight", "Back", "Top"]):
for j, jcol in enumerate(["SideLeft", "SideRight", "FrontLeft", "FrontRight", "Back", "Top"]):
if i < j:
feature_df.loc[k, f"{icol}-{jcol}"] = segment[icol].corr(
segment[jcol]
)
return feature_df
def align_segments_on_column(
sg_segments: List[pd.DataFrame], align_column: str = "Top", align_ix: int = 0
) -> List[pd.DataFrame]:
"""align segments based on max correlation with the segment with index align_ix. The order is preserved
:param sg_segments: list of segments
:param align_column: column to align on
:param align_ix: index of the segment to align on
:return: list of aligned segments
"""
align_segment = sg_segments[align_ix]
new_segments = []
for k, segment in tqdm(enumerate(sg_segments), total=len(sg_segments)):
if k != align_ix:
cross_corr = [
align_segment[align_column].corr(segment[align_column].shift(i))
for i in range(-250, 250)
]
shift = np.argmax(cross_corr) - 250
new_segments.append(segment.shift(shift))
else:
new_segments.append(segment)
return new_segments
def smoother_segments(
segments: List[pd.DataFrame], rolling_window: int = 20
) -> List[pd.DataFrame]:
"""smooth segments based on rolling window. The columns 'HeartActivityLabel' and 'heart_beat' are removed.
:param segments: list of segments
:param rolling_window: rolling window
:return: list of smoothed segments
"""
new_segments = []
for segment in tqdm(segments, total=len(segments)):
new_segment = segment.rolling(rolling_window).mean()
new_segment = new_segment.drop(columns=["HeartActivityLabel", "heart_beat"])
new_segment.dropna(inplace=True)
new_segment.reset_index(drop=True, inplace=True)
new_segments.append(new_segment)
return new_segments
def align_segments(
segments: List[pd.DataFrame], origin_df: pd.DataFrame, picture_path: Path
) -> Tuple[List[pd.DataFrame], str]:
"""correlation analysis
This method analyzes the correlation between the segments based on different columns.
For each column, the segments are aligned based on the maximal correlation with the first segment.
To measure the success of the alignment, the correlation between the aligned segments is calculated
for each column and aggregated as mean.
Then, the mean of all aggregated means of each column is calculated.
Based on this value, we pick the column which is most promising to align the segments.
Also, it generates pictures of the correlation.
:param segments: list of segments
:param origin_df: original dataframe
:param picture_path: path to save pictures
:return: list of aligned segments and name of the most promising column
"""
print("aligning segments")
print("#################################################################")
if not isinstance(picture_path, Path):
picture_path = Path(picture_path)
os.makedirs(picture_path, exist_ok=True)
# intra pearson correlation
print("intra pearson correlation")
intra_corr_df = calculate_intra_pearson_correlation(segments)
print(intra_corr_df)
print(intra_corr_df.describe())
print("intra hole segment")
intra_corr_hole_segment_df = calculate_intra_pearson_correlation([origin_df])
print(intra_corr_hole_segment_df)
# inter pearson correlation
print("inter pearson correlation")
inter_corr_df = calculate_inter_pearson_correlation(segments)
print(inter_corr_df)
print(inter_corr_df.describe())
print("show the ones which are less correlated")
print(inter_corr_df.Top.abs().sort_values().head(20))
plt.figure().set_size_inches(14, 9)
plt.plot(segments[2].Top)
plt.plot(segments[6].Top)
plt.legend(["segment 2", "segment 6"])
plt.savefig(picture_path / "correlation_segments_Top_2_6.jpg", bbox_inches="tight")
plt.close()
plt.figure().set_size_inches(14, 9)
plt.plot(segments[2].Top)
plt.plot(segments[1].Top)
plt.legend(["segment 2", "segment 1"])
plt.savefig(picture_path / "correlation_segments_Top_1_2.jpg", bbox_inches="tight")
plt.close()
plt.figure().set_size_inches(14, 9)
cross_corr = [
segments[2].Top.corr(segments[6].Top.shift(i)) for i in range(-250, 250)
]
shift = np.argmax(cross_corr) - 250
plt.plot(segments[2].Top)
plt.plot(segments[6].Top)
plt.plot(segments[6].Top.shift(shift))
plt.legend(["segment 2", "segment 6", "shifted segment 6"])
plt.savefig(
picture_path / "correlation_segments_Top_2_6_shifted.jpg", bbox_inches="tight"
)
plt.close()
new_segments_list = align_segments_on_column(segments, "Top")
plt.figure().set_size_inches(14, 9)
plt.plot(new_segments_list[2].Top)
plt.plot(new_segments_list[6].Top)
plt.title("group shifted by Top")
plt.legend(["segment 2", "segment 6"])
plt.savefig(
picture_path / "correlation_segments_Top_2_6_group_shifted.jpg",
bbox_inches="tight",
)
plt.close()
os.makedirs(picture_path / "alignments", exist_ok=True)
# plot all segments in one picture for each column
for col in ["SideLeft", "SideRight", "FrontLeft", "FrontRight", "Back", "Top"]:
plt.figure().set_size_inches(14, 9)
for segment in segments:
plt.plot(segment[col])
plt.savefig(
picture_path / f"alignments/unaligned_segments_{col}.jpg",
bbox_inches="tight",
)
plt.close()
means = {"origin": inter_corr_df.describe().loc["mean"].mean()}
std = {"origin": inter_corr_df.describe().loc["std"].mean()}
for col in ["SideLeft", "SideRight", "FrontLeft", "FrontRight", "Back", "Top"]:
new_segments_list = align_segments_on_column(segments, col)
inter_correlation_df = calculate_inter_pearson_correlation(new_segments_list)
print(f"------------------{col}------------------")
descr = inter_correlation_df.describe()
print(descr)
means[col] = descr.loc["mean"].mean()
std[col] = descr.loc["std"].mean()
# plot all aligned segments in one plot for each column
for col2 in ["SideLeft", "SideRight", "FrontLeft", "FrontRight", "Back", "Top"]:
plt.figure().set_size_inches(14, 9)
for segment in new_segments_list:
plt.plot(segment[col2])
plt.savefig(
picture_path / f"alignments/aligned_segments_{col}_{col2}.jpg",
bbox_inches="tight",
)
plt.close()
print("mean of mean of the correlation between segments for each column")
print(means)
print("mean of std of the correlation between segments for each column")
print(std)
max_column = max(means, key=means.get)
if max_column == "origin":
return segments, max_column
new_segments_list = align_segments_on_column(segments, max_column)
return new_segments_list, max_column
def analyze_heart_beat(df: pd.DataFrame, root_picture_path: Path) -> pd.DataFrame:
"""This method will analyze the heart beats in the signal, generate plots and correct the missing labels.
It will return a copy of the dataframe with the heart beats corrected.
:param df: pandas dataFrame. Must contain the columns "heart_beat" and "HeartActivity"
:param root_picture_path: the path to the root picture path
:return: a copy of the dataframe with the heart beats corrected
"""
new_df = df.copy()
heart_beat = df.heart_beat[df.heart_beat == 1]
plt.figure().set_size_inches(14, 9)
plt.plot(heart_beat.index, df.HeartActivity[heart_beat.index], "bo")
plt.plot(df.HeartActivity)
plt.legend(["HeartActivityLabel", "HeartActivity"])
plt.savefig(root_picture_path / "heart_beat.jpg", bbox_inches="tight")
plt.close()
# heart beat close up
plt.figure().set_size_inches(14, 9)
plt.plot(
heart_beat.loc[250000:300000].index,
df.HeartActivity[heart_beat.index].loc[250000:300000],
"bo",
)
plt.plot(df.HeartActivity.loc[250000:300000])
plt.legend(["HeartActivityLabel", "HeartActivity"])
plt.savefig(root_picture_path / "heart_beat_close_up.jpg", bbox_inches="tight")
plt.close()
# missing meta points
plt.figure().set_size_inches(14, 9)
plt.plot(
heart_beat.loc[100000:135000].index,
df.HeartActivity[heart_beat.index].loc[100000:135000],
"bo",
)
plt.plot(df.HeartActivity.loc[100000:135000])
plt.legend(["HeartActivityLabel", "HeartActivity"])
plt.savefig(
root_picture_path / "heart_beats_close_up_without_labels.jpg",
bbox_inches="tight",
)
plt.close()
# correct missing heart beats labels
peaks, _ = find_peaks(df.HeartActivity.loc[100000:135000], prominence=0.03)
plt.figure().set_size_inches(14, 9)
plt.plot(
heart_beat.loc[100000:135000].index,
df.HeartActivity[heart_beat.index].loc[100000:135000],
"bo",
)
plt.plot(df.HeartActivity.loc[100000:135000])
plt.plot(peaks + 100000, df.HeartActivity.loc[100000:135000][peaks + 100000], "rx")
plt.legend(["HeartActivityLabel", "HeartActivity", "corrected HeartActivityLabel"])
plt.savefig(
root_picture_path / "heart_beats_close_up_with_corrected_labels.jpg",
bbox_inches="tight",
)
plt.close()
peaks, _ = find_peaks(df.HeartActivity, prominence=0.03)
missing_peaks = np.array(
[
peak
for peak in peaks
if not (df.heart_beat[peak - 500: peak + 500] != 0).any()
]
)
new_df.loc[missing_peaks, "heart_beat"] = 1
heart_beat = new_df.heart_beat[new_df.heart_beat == 1]
# heart beat
plt.figure().set_size_inches(14, 9)
plt.plot(heart_beat.index, new_df.HeartActivity[heart_beat.index], "bo")
plt.plot(new_df.HeartActivity)
plt.legend(["HeartActivityLabel", "HeartActivity"])
plt.savefig(
root_picture_path / "heart_beats_with_corrected_labels.jpg", bbox_inches="tight"
)
plt.close()
return new_df
def find_peak(
stream: pd.Series,
index: int,
prominence: float = 0.03,
distance: int = 200,
up: bool = True,
) -> int:
"""This method will find the peak in the stream, if it exists.
:param stream: to be searched
:param index: index of the heart beat to search around
:param prominence: prominence of the peak
:param distance: maximal distance between the heart beat and the peak
:param up: if True, peaks pointing up will be found, otherwise down
"""
if not up:
stream = -stream
peaks, properties = find_peaks(
stream.loc[index - distance: index + distance], prominence=prominence
)
peak = peaks[np.argmax(properties["prominences"])]
return peak + index - distance
def align_columns(df: pd.DataFrame, root_picture_path: Path) -> pd.DataFrame:
"""This method search for the Top peak in the segment 192000:194000 close to the heartbeat
and will shift the data stream Top accordingly.
It will return a copy of the data frame with the Top shifted.
Also, it generates plots of segments of the data stream and the shifted data stream.
:param df: the original data frame. Must contain the columns "SideLeft", "SideRight", "Top", "FrontLeft", "FrontRight", "Back", "heart_beat"
:param root_picture_path: the path to the root picture path
:return: a copy of the data frame with the Top column shifted
"""
heart_beat = df.heart_beat[df.heart_beat == 1]
peak_cor = find_peak(
df.Top, heart_beat.loc[192000:194000].index[0], distance=100, prominence=0.0001
)
cor_shift = heart_beat.loc[192000:194000].index[0] - peak_cor
shifted_df = df.copy()
shifted_df.Top = shifted_df.Top.shift(cor_shift)
plt.figure().set_size_inches(14, 9)
plt.plot(df.SideLeft[192000:194000])
plt.plot(df.SideRight[192000:194000])
plt.plot(df.Top[192000:194000])
plt.plot(df.FrontLeft[192000:194000])
plt.plot(df.FrontRight[192000:194000])
plt.plot(df.Back[192000:194000])
plt.plot(
heart_beat.loc[192000:194000].index,
[0] * len(heart_beat.loc[192000:194000].index),
"bo",
)
plt.plot(peak_cor, df.Top[peak_cor], "rx")
plt.legend(["SideLeft", "SideRight", "Top", "FrontLeft", "FrontRight", "Back", "HeartActivityLabel", "Top_peak"])
plt.savefig(
root_picture_path / f"segment_{192000}_{194000}_cor_peak.jpg",
bbox_inches="tight",
)
plt.close()
for i, k in [
(192000, 194000),
(195000, 197000),
(197500, 199500),
(206500, 208500),
]:
plot_signal(
df[i:k],
["SideLeft", "SideRight", "Top", "FrontLeft", "FrontRight", "Back"],
root_picture_path / f"segment_{i}_{k}.jpg",
heartbeat=True,
)
plot_signal(
shifted_df[i:k],
["SideLeft", "SideRight", "Top", "FrontLeft", "FrontRight", "Back"],
root_picture_path / f"segment_{i}_{k}_cor_shifted.jpg",
heartbeat=True,
)
return shifted_df
def segment_signal(df: pd.DataFrame, overlap: int = 500) -> List[pd.DataFrame]:
"""This method will segment the data frame into smaller ones.
:param df: the data frame to segment
:param overlap: the overlap between two segments
:return: a list of data frames
"""
heart_beat = df.heart_beat[df.heart_beat == 1]
segment_indices = heart_beat.index.to_numpy()
segments = []
for i in range(len(segment_indices) - 1):
new_segment = df.loc[
segment_indices[i] - overlap: segment_indices[i + 1] + overlap - 1
]
new_segment.reset_index(drop=True, inplace=True)
segments.append(new_segment)
return segments
def load_data(data_path: str) -> pd.DataFrame:
"""This method loads the data from the data path and returns a data frame.
:param data_path: the path to the data
:return: the data frame
"""
dt = np.dtype(
[
("SideLeft", np.float32),
("FrontLeft", np.float32),
("SideRight", np.float32),
("FrontRight", np.float32),
("Back", np.float32),
("Top", np.float32),
("SoundPressureLevel", np.float32),
("HeartActivity", np.float32),
("HeartActivityLabel", np.float32),
]
)
data = np.fromfile(data_path, dtype=dt)
data_df = pd.DataFrame(data)
return data_df
if __name__ == "__main__":
# Adjusting the default pandas settings
pd.set_option("max_columns", 15)
pd.set_option("expand_frame_repr", False)
pd.set_option("display.precision", 9)
root_picture_path = Path("pictures")
os.makedirs(root_picture_path, exist_ok=True)
data_df = load_data(DATA_PATH)
print(data_df.head(10))
print(data_df.describe())
data_df["heart_beat"] = data_df.HeartActivityLabel.apply(lambda x: int(bool(x)))
data_df = analyze_heart_beat(data_df, root_picture_path)
data_aligned_df = align_columns(data_df, root_picture_path)
plot_signal(data_aligned_df, ["SideLeft", "SideRight", "Top"], root_picture_path / "hole_data.jpg")
plot_signal(data_aligned_df, ["SoundPressureLevel"], root_picture_path / "SoundPressureLevel.jpg")
plot_signal(
data_aligned_df.loc[190000:250000],
["SideLeft", "SideRight"],
root_picture_path / "segment_190000_250000.jpg"
)
short_df = data_aligned_df.loc[190000:250000]
short_df.reset_index(drop=True, inplace=True)
short_origin_df = data_df.loc[190000:250000]
short_origin_df.reset_index(drop=True, inplace=True)
data_segments = segment_signal(short_df, overlap=0)
data_origin_segments = segment_signal(short_origin_df, overlap=0)
os.makedirs(root_picture_path / "segments", exist_ok=True)
for k, segment in enumerate(data_segments):
plot_signal(segment, ["SideLeft", "SideRight"], root_picture_path / "segments" / f"segment_{k}.jpg")
os.makedirs(root_picture_path / "segments_allcol", exist_ok=True)
for k, segment in enumerate(data_segments):
plot_signal(segment, ["SideLeft", "SideRight", "Top", "FrontLeft", "SideRight", "Back"], root_picture_path / "segments_allcol" / f"segment_{k}.jpg")
print("cor not shifted")
aligned_origin_segments, _ = align_segments(data_origin_segments, short_origin_df, root_picture_path / "cor_not_shifted")
print("cor shifted")
aligned_segments, _ = align_segments(
data_segments, short_df, root_picture_path / "correlation_analyzes"
)
# smooth the segments and apply the method align_segments. Compare the results with the not smoothed segments
# This is only for analyzes and possibly alignment of the segments not for further processing.
smooth_segments = smoother_segments(data_segments, rolling_window=20)
aligned_smooth_segments, _ = align_segments(
smooth_segments, short_df, root_picture_path / "correlation_smooth_analyzes"
)
|
#coding:utf8
import logging
logging.basicConfig(level=logging.DEBUG)
_logger = logging.getLogger(__name__)
#exception
from exceptions import ApplicationException
from django.http import Http404
from django.http import HttpResponseBadRequest
#crypt
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from itsdangerous import SignatureExpired
import base64
import json
import traceback
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.padding import PKCS7
#common
import time
from pprint import pprint
#from flask import request
import urllib2
import logging
import requests
#database
#from flask_wechat_utils import config as config_common
from models import WXUser as User
import config as config_common
#error
from exceptions import ERROR_TOKEN_MISSING
from exceptions import ERROR_TOKEN_WRONG_DECRYPT
from exceptions import ERROR_TOKEN_TIMEOUT
from exceptions import ERROR_TOKEN_WRONG_FIELDS
from exceptions import ERROR_TOKEN_WRONG_NO_USER
from exceptions import ERROR_CODE_WRONG
from exceptions import ERROR_IV_ENCRYPTED_WRONG
from exceptions import ERROR_MONGO_GET_USER_WRONG
from exceptions import ERROR_TOKEN_WRONG_ENCRYPT
from exceptions import ERROR_CONTENT_TYPE_NOT_JSON
from exceptions import ERROR_LOGIN_CODE_MISSING
from exceptions import ERROR_LOGIN_CODE_FREQUENCY_LIMIT
from exceptions import ERROR_LOGIN_CODE_WEIXIN_BUSY
from exceptions import ERROR_LOGIN_CODE_LOST_EFFECT
from exceptions import ERROR_LOGIN_CODE_NO_WHY
from exceptions import ERROR_LOGIN_MONGO_CREATE_FAIL
from exceptions import ERROR_LOGIN_MONGO_UPDATE_FAIL
from exceptions import ERROR_REGISTER_MISSING_IV_OR_ENCRYPTED
from exceptions import ERROR_REGISTER_MISSING_TOKEN
from exceptions import ERROR_REGISTER_NO_USER
from exceptions import ERROR_MISSING_WXAPP_ID
from exceptions import ERROR_MISSING_WXAPP_SECRET
from exceptions import ERROR_CONFIG_WEB_NAME_WRONG
from exceptions import ERROR_CONFIG_TOKEN_SECRET_KEY_WRONG
from exceptions import ERROR_CONFIG_TOKEN_TIMEOUT_HOURS_WRONG
from exceptions import ERROR_CONFIG_TOKEN_SALT_WRONG
from exceptions import ERROR_CONFIG_TOKEN_FIELDS_REQUIRED_WRONG
from exceptions import ERROR_CONFIG_TOKEN_HEADER_FIELD_WRONG
from exceptions import ERROR_CONFIG_LOGIN_CODE_FIELD_NAME_WRONG
from exceptions import ERROR_CONFIG_UPDATE_IV_FIELD_NAME_WRONG
from exceptions import ERROR_CONFIG_UPDATE_ENCRYPTEDDATA_FIELD_NAME_WRONG
#----------------------------------
# the applications configurations
#----------------------------------
def get_appid():
#return config_common.WXAPP_ID
if not config_common.WXAPP_ID:
raise ApplicationException(
errcode=ERROR_MISSING_WXAPP_ID,
)
return config_common.WXAPP_ID
def get_appsecret():
if not config_common.WXAPP_SECRET:
raise ApplicationException(
errcode=ERROR_MISSING_WXAPP_SECRET,
)
return config_common.WXAPP_SECRET
def get_web_name():
if not isinstance(config_common.WEB_NAME, str):
raise ApplicationException(
errcode=ERROR_CONFIG_WEB_NAME_WRONG,
)
return config_common.WEB_NAME
def get_token_secret_key():
if not isinstance(config_common.TOKEN_SECRET_KEY, str):
raise ApplicationException(
errcode=ERROR_CONFIG_TOKEN_SECRET_KEY_WRONG,
)
return config_common.TOKEN_SECRET_KEY
def get_token_timeout_hours():
if not isinstance(config_common.TOKEN_TIMEOUT_HOURS, int):
raise ApplicationException(
errcode=ERROR_CONFIG_TOKEN_TIMEOUT_HOURS_WRONG,
)
return config_common.TOKEN_TIMEOUT_HOURS
def get_token_salt():
if not isinstance(config_common.TOKEN_SALT, str):
raise ApplicationException(
errcode=ERROR_CONFIG_TOKEN_SALT_WRONG,
)
return config_common.TOKEN_SALT
def get_token_fields_required():
if not isinstance(config_common.TOKEN_FIELDS_REQUIRED, list):
raise ApplicationException(
errcode=ERROR_CONFIG_TOKEN_FIELDS_REQUIRED_WRONG,
)
return config_common.TOKEN_FIELDS_REQUIRED
def get_token_header_field():
if not isinstance(config_common.TOKEN_HEADER_FIELD, str):
raise ApplicationException(
errcode=ERROR_CONFIG_TOKEN_HEADER_FIELD_WRONG,
)
return config_common.TOKEN_HEADER_FIELD
def get_login_code_field_name():
if not isinstance(config_common.LOGIN_CODE_FIELD_NAME, str):
raise ApplicationException(
errcode=ERROR_CONFIG_LOGIN_CODE_FIELD_NAME_WRONG,
)
return config_common.LOGIN_CODE_FIELD_NAME
def get_update_iv_field_name():
if not isinstance(config_common.UPDATE_IV_FIELD_NAME, str):
raise ApplicationException(
errcode=ERROR_CONFIG_UPDATE_IV_FIELD_NAME_WRONG,
)
return config_common.UPDATE_IV_FIELD_NAME
def get_update_encryptedData_field_name():
if not isinstance(config_common.UPDATE_ENCRYPTEDDATA_FIELD_NAME, str):
raise ApplicationException(
errcode=ERROR_CONFIG_UPDATE_ENCRYPTEDDATA_FIELD_NAME_WRONG,
)
return config_common.UPDATE_ENCRYPTEDDATA_FIELD_NAME
#----------------------------------
# the databases configurations
#----------------------------------
def get_wechat_user_info_from_database(token):
result_token = decrypt_token(token)
for attribute in get_token_fields_required():
if not result_token.get(attribute):
raise ApplicationException(
errcode=ERROR_TOKEN_WRONG_FIELDS,
)
try:
user = User.objects.get(openid=result_token['openid'])
except:
raise ApplicationException(
errcode=ERROR_TOKEN_WRONG_NO_USER,
)
if user:
return user
else:
raise ApplicationException(
errcode=ERROR_TOKEN_WRONG_NO_USER,
)
#----------------------------------
# the datetime configurations
#----------------------------------
def now_ts():
return int(time.time())
#----------------------------------
# the common functions
#----------------------------------
def get_session_key_from_weixin(appid,appsecret,js_code):
weixin_api = 'https://api.weixin.qq.com/sns/jscode2session'
grant_type = 'authorization_code'
url = '{}?appid={}&secret={}&js_code={}&grant_type={}'.format(
weixin_api,
appid,
appsecret,
js_code,
grant_type,
)
return requests.get(url).json()
def decrypt_token(token):
cryption = Serializer(
secret_key=get_token_secret_key(),
salt=get_token_salt(),
expires_in=get_token_timeout_hours() * 3600
)
try:
result_token = cryption.loads(token)
return result_token
except SignatureExpired:
raise ApplicationException(
errcode=ERROR_TOKEN_TIMEOUT,
)
except:
raise ApplicationException(
errcode=ERROR_TOKEN_WRONG_DECRYPT,
)
def decrypt(session_key, iv, encrypted):
try:
key = base64.b64decode(session_key)
iv = base64.b64decode(iv)
cipher = Cipher(
algorithms.AES(key),
modes.CBC(iv),
backend=default_backend(),
)
decryptor = cipher.decryptor()
plain = decryptor.update(base64.b64decode(encrypted)) + decryptor.finalize()
unpadder = PKCS7(128).unpadder()
decrypted = unpadder.update(plain)
decrypted += unpadder.finalize()
decrypted = json.loads(decrypted.decode('utf8'))
except UtilsException:
raise
except:
raise ApplicationException(
errcode=ERROR_IV_ENCRYPTED_WRONG,
)
return decrypted
def encrypt_token(userinfo):
cryption = Serializer(
secret_key=get_token_secret_key(),
salt=get_token_salt(),
expires_in=get_token_timeout_hours() * 3600
)
try:
result_token = cryption.dumps(userinfo)
return result_token
except:
raise ApplicationException(
errcode=ERROR_TOKEN_WRONG_ENCRYPT,
)
#----------------------------------
# the apis
#----------------------------------
def auth(func):
def wrapper(self, request, *args, **kwargs):
_logger.debug('-----------user authing-------------')
token = request.environ.get('HTTP_{}'.format(get_token_header_field().upper()))
content_type = request.environ.get('CONTENT_TYPE')
_logger.debug({'content_type':content_type,'token':token})
if not content_type or not content_type.lower() == 'application/json':
raise ApplicationException(
errcode=ERROR_CONTENT_TYPE_NOT_JSON,
)
if not token:
raise ApplicationException(
errcode=ERROR_TOKEN_MISSING,
)
else:
request.wechat_user = get_wechat_user_info_from_database(token)
return func(self, request, *args, **kwargs)
return wrapper
def login(func):
def wrapper(self, request, *args, **kwargs):
_logger.debug(request.__dict__)
_logger.debug('-----------user logining------------')
content_type = request.environ.get('CONTENT_TYPE')
if not content_type or not content_type.lower() == 'application/json':
raise ApplicationException(
errcode=ERROR_CONTENT_TYPE_NOT_JSON,
)
code_field_name = get_login_code_field_name()
#params = request.form
#params = request.json # flask-wechat-utils
#params = json.loads(request.body) # django
params = request.data # rest framework
_logger.debug(params)
code = params.get(code_field_name)
_logger.debug({'content_type':content_type,'code':code})
if not code:
raise ApplicationException(
errcode=ERROR_LOGIN_CODE_MISSING,
)
result_login = get_session_key_from_weixin(
appid=get_appid(),
appsecret=get_appsecret(),
js_code=params.get(code_field_name)
)
#success
if result_login.get('session_key') and result_login.get('openid'):
try:
user = User.objects.get(openid=result_login['openid'])
except:
user = None
#create
if not user:
try:
User.objects.create(
session_key=result_login['session_key'],
openid=result_login['openid'],
last_login_ts=now_ts()
).save()
except:
raise ApplicationException(
errcode=ERROR_LOGIN_MONGO_CREATE_FAIL,
)
#update
else:
try:
user.session_key=result_login.get('session_key')
user.last_login_ts=now_ts()
user.last_ping_ts=now_ts()
user.save()
except:
raise ApplicationException(
errcode=ERROR_LOGIN_MONGO_UPDATE_FAIL,
)
user = User.objects.get(openid=result_login['openid'])
userinfo = {}
for key in get_token_fields_required():
value = getattr(user,key)
userinfo[key] = value
request.wechat_user_token = encrypt_token(userinfo)
request.wechat_user = user
# #fail
# elif result_login.get('errcode') == 45011:
# raise ApplicationException(
# errcode=ERROR_LOGIN_CODE_FREQUENCY_LIMIT,
# )
# elif result_login.get('errcode') == -1:
# raise ApplicationException(
# errcode=ERROR_LOGIN_CODE_WEIXIN_BUSY,
# )
# elif result_login.get('errcode') == 40029:
# raise ApplicationException(
# errcode=ERROR_LOGIN_CODE_LOST_EFFECT,
# )
else:
_logger.debug(result_login)
raise ApplicationException(
errcode=ERROR_LOGIN_CODE_NO_WHY,
)
return func(self, request, *args, **kwargs)
return wrapper
def register(func):
def wrapper(self, request, *args, **kwargs):
_logger.debug('-----------user registing-----------')
token = request.environ.get('HTTP_{}'.format(get_token_header_field().upper()))
content_type = request.environ.get('CONTENT_TYPE')
_logger.debug({'content_type':content_type,'token':token})
if not content_type or not content_type.lower() == 'application/json':
raise ApplicationException(
errcode=ERROR_CONTENT_TYPE_NOT_JSON,
)
if not token:
raise ApplicationException(
errcode=ERROR_REGISTER_MISSING_TOKEN,
)
#params = request.form
#params = request.json # flask-wechat-utils
params = request.data # rest framework
_logger.debug(params)
encryptedData_field_name = get_update_encryptedData_field_name()
iv_field_name = get_update_iv_field_name()
if not params.get(iv_field_name) or not params.get(encryptedData_field_name):
raise ApplicationException(
errcode=ERROR_REGISTER_MISSING_IV_OR_ENCRYPTED,
)
iv = params.get(iv_field_name)
encryptedData = params.get(encryptedData_field_name)
result_token = decrypt_token(token)
openid = result_token['openid']
try:
user = User.objects.get(openid=openid)
except:
raise ApplicationException(
errcode=ERROR_REGISTER_MISSING_IV_OR_ENCRYPTED,
)
session_key = user.session_key
#session_key = 'HKUOhvaNYvMxGCt2BpjfJg=='
result_userinfo = decrypt(
session_key = session_key,
iv=iv,
encrypted=encryptedData
)
mobile = '' if result_userinfo.get('mobile') == None else result_userinfo.get('mobile')
nickName = '' if result_userinfo.get('nickName') == None else result_userinfo.get('nickName')
avatarUrl = '' if result_userinfo.get('avatarUrl') == None else result_userinfo.get('avatarUrl')
gender = '' if result_userinfo.get('gender') == None else result_userinfo.get('gender')
city = '' if result_userinfo.get('city') == None else result_userinfo.get('city')
province = '' if result_userinfo.get('province') == None else result_userinfo.get('province')
country = '' if result_userinfo.get('country') == None else result_userinfo.get('country')
language = '' if result_userinfo.get('language') == None else result_userinfo.get('language')
user.session_key=session_key
user.nickname=nickName
user.avatar=avatarUrl
user.gender=gender
user.city=city
user.province=province
user.country=country
user.language=language
user.mobile=mobile
user.last_login_ts=now_ts()
user.last_ping_ts=now_ts()
user.save()
user = User.objects.get(openid=openid)
request.wechat_user = user
return func(self, request, *args, **kwargs)
return wrapper
|
# Copyright (c) 2016-2021, Thomas Larsson
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those
# of the authors and should not be interpreted as representing official policies,
# either expressed or implied, of the FreeBSD Project.
import bpy
from .settings import GS, LS
from . import globvars as G
def clearErrorMessage():
G.theMessage = ""
G.theErrorLines = []
class ErrorOperator(bpy.types.Operator):
bl_idname = "daz.error"
bl_label = "Daz Importer"
def execute(self, context):
return {'RUNNING_MODAL'}
def invoke(self, context, event):
G.theErrorLines = G.theMessage.split('\n')
maxlen = len(self.bl_label)
for line in G.theErrorLines:
if len(line) > maxlen:
maxlen = len(line)
width = 20+5*maxlen
height = 20+5*len(G.theErrorLines)
#self.report({'INFO'}, G.theMessage)
wm = context.window_manager
return wm.invoke_props_dialog(self, width=width)
def draw(self, context):
for line in G.theErrorLines:
self.layout.label(text=line)
def invokeErrorMessage(value, warning=False):
if warning:
G.theMessage = "WARNING:\n" + value
else:
G.theMessage = "ERROR:\n" + value
if G.theSilentMode:
print(G.theMessage)
else:
bpy.ops.daz.error('INVOKE_DEFAULT')
class DazError(Exception):
def __init__(self, value, warning=False):
invokeErrorMessage(value, warning)
def __str__(self):
return repr(G.theMessage)
def reportError(msg, instances={}, warnPaths=False, trigger=(2,3), force=False):
global theUseDumpErrors
trigWarning,trigError = trigger
if GS.verbosity >= trigWarning or force:
print(msg)
if GS.verbosity >= trigError or force:
theUseDumpErrors = True
if warnPaths:
msg += ("\nHave all DAZ library paths been set up correctly?\n" +
"See https://diffeomorphic.blogspot.se/p/setting-up-daz-library-paths.html ")
msg += ("\nFor details see\n'%s'" % getErrorPath())
raise DazError(msg)
return None
def getErrorPath():
import os
return os.path.realpath(os.path.expanduser(GS.errorPath))
def handleDazError(context, warning=False, dump=False):
global theUseDumpErrors
if not (dump or theUseDumpErrors):
return
theUseDumpErrors = False
filepath = getErrorPath()
try:
fp = open(filepath, "w", encoding="utf_8")
except:
print("Could not write to %s" % filepath)
return
fp.write(G.theMessage)
try:
if warning:
string = getMissingAssets()
fp.write(string)
print(string)
else:
printTraceBack(context, fp)
except:
pass
finally:
fp.write("\n")
fp.close()
print(G.theMessage)
LS.reset()
def dumpErrors(context):
filepath = getErrorPath()
with open(filepath, "w") as fp:
printTraceBack(context, fp)
def getMissingAssets():
if not LS.missingAssets:
return ""
string = "\nMISSING ASSETS:\n"
for ref in LS.missingAssets.keys():
string += (" %s\n" % ref)
return string
def printTraceBack(context, fp):
import sys, traceback
type,value,tb = sys.exc_info()
fp.write("\n\nTRACEBACK:\n")
traceback.print_tb(tb, 30, fp)
from .node import Node
fp.write("\n\nFILES VISITED:\n")
for string in G.theTrace:
fp.write(" %s\n" % string)
fp.write("\nASSETS:")
refs = list(G.theAssets.keys())
refs.sort()
for ref in refs:
asset = G.theAssets[ref]
asset.errorWrite(ref, fp)
fp.write("\n\nOTHER ASSETS:\n")
refs = list(G.theOtherAssets.keys())
refs.sort()
for ref in refs:
fp.write('"%s"\n %s\n\n' % (ref, G.theOtherAssets[ref]))
fp.write("\nDAZ ROOT PATHS:\n")
for n, path in enumerate(G.theDazPaths):
fp.write('%d: "%s"\n' % (n, path))
string = getMissingAssets()
fp.write(string)
fp.write("\nSETTINGS:\n")
settings = []
scn = bpy.context.scene
for attr in dir(scn):
if attr[0:3] == "Daz" and hasattr(scn, attr):
value = getattr(scn, attr)
if (isinstance(value, int) or
isinstance(value, float) or
isinstance(value, str) or
isinstance(value, bool)):
settings.append((attr, value))
settings.sort()
for attr,value in settings:
if isinstance(value, str):
value = ('"%s"' % value)
fp.write('%25s: %s\n' % (attr, value))
theUseDumpErrors = False
#-------------------------------------------------------------
# Execute
#-------------------------------------------------------------
class DazOperator(bpy.types.Operator):
def execute(self, context):
self.prequel(context)
try:
self.run(context)
except DazError:
handleDazError(context)
except KeyboardInterrupt:
G.theMessage = "Keyboard interrupt"
bpy.ops.daz.error('INVOKE_DEFAULT')
finally:
self.sequel(context)
G.theFilePaths = ""
return{'FINISHED'}
def prequel(self, context):
self.storeState(context)
clearErrorMessage()
def sequel(self, context):
wm = bpy.context.window_manager
wm.progress_update(100)
wm.progress_end()
self.restoreState(context)
def storeState(self, context):
from .utils import getSelectedObjects
self.mode = None
self.activeObject = context.object
self.selectedObjects = [ob.name for ob in getSelectedObjects(context)]
if context.object:
self.mode = context.object.mode
try:
bpy.ops.object.mode_set(mode='OBJECT')
except RuntimeError:
pass
def restoreState(self, context):
from .utils import setActiveObject
try:
if self.activeObject:
setActiveObject(context, self.activeObject)
for obname in self.selectedObjects:
if obname in bpy.data.objects.keys():
bpy.data.objects[obname].select_set(True)
if self.mode:
bpy.ops.object.mode_set(mode=self.mode)
except RuntimeError:
pass
class DazPropsOperator(DazOperator):
dialogWidth = 300
def invoke(self, context, event):
wm = context.window_manager
return wm.invoke_props_dialog(self, width=self.dialogWidth)
class IsObject:
@classmethod
def poll(self, context):
return context.object
class IsMesh:
@classmethod
def poll(self, context):
return (context.object and context.object.type == 'MESH')
class IsArmature:
@classmethod
def poll(self, context):
return (context.object and context.object.type == 'ARMATURE')
class IsMeshArmature:
@classmethod
def poll(self, context):
return (context.object and context.object.type in ['MESH', 'ARMATURE'])
|
#!/usr/bin/env python2.7
import flask
import time
from flask import request
from flask import Flask, jsonify, Response
from apiJson import getJsonValues,getJsonValuesFromSingleProcesser
from multiprocessing import Pool
from flask import Flask
app = Flask(__name__)
_pool = None
def matrix_function(content):
response = getJsonValuesFromSingleProcesser(content)
return response
@app.route('/matrix/pp', methods=['POST'])
def route_expcalc():
content = request.get_json()
f = _pool.apply_async(matrix_function,[content])
response = f.get(timeout=2)
return Response (response, mimetype="application/json")
if __name__=='__main__':
_pool = Pool(processes=4)
try:
# insert production server deployment code
app.run()
except KeyboardInterrupt:
_pool.close()
_pool.join()
|
# coding: utf-8
import json
from django.conf import settings
from django.core import serializers
from django.db import models
from django.db.models.signals import post_save
from django.contrib.auth.models import User
from datetime import datetime
class UserProfile(models.Model):
# This field is required.
user = models.OneToOneField(User, related_name=u'profile')
# Other fields here
# …
@property
def email(self):
return self.user.email
@property
def is_active(self):
return self.user.is_active
def __unicode__(self):
return u", ".join(filter(None, [ unicode(self.user),
self.user.email, ]))
# automatically adding UserProfile to User-instance, if created
def create_user_profile(sender, instance, created, **kwargs):
if created:
UserProfile.objects.create(user=instance)
post_save.connect(create_user_profile, sender=User)
|
"""This module contains the general information for SystemIOController ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class SystemIOControllerConsts:
pass
class SystemIOController(ManagedObject):
"""This is SystemIOController class."""
consts = SystemIOControllerConsts()
naming_props = set([u'id'])
mo_meta = {
"classic": MoMeta("SystemIOController", "systemIOController", "sioc-[id]", VersionMeta.Version202c, "OutputOnly", 0xf, [], ["read-only"], [u'computeRackUnit'], [u'firmwareBootDefinition', u'firmwareRunning', u'firmwareUpdatable'], ["Get"]),
}
prop_meta = {
"classic": {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version202c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, None, None, None, None, [], ["0-999"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version202c, MoPropertyMeta.READ_ONLY, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
},
}
prop_map = {
"classic": {
"childAction": "child_action",
"description": "description",
"dn": "dn",
"id": "id",
"rn": "rn",
"status": "status",
},
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.description = None
self.status = None
ManagedObject.__init__(self, "SystemIOController", parent_mo_or_dn, **kwargs)
|
import re
def striptags(text):
return re.compile(r'<[^>]*>').sub('', text)
def strip_toplevel_anchors(text):
return re.compile(r'\.html#.*-toplevel').sub('.html', text)
|
# Copyright: (c) OpenSpug Organization. https://github.com/openspug/spug
# Copyright: (c) <spug.dev@gmail.com>
# Released under the AGPL-3.0 License.
import json
from .utils import AttrDict
# 自定义的解析异常
class ParseError(BaseException):
def __init__(self, message):
self.message = message
# 需要校验的参数对象
class Argument(object):
"""
:param name: name of option
:param default: default value if the argument if absent
:param bool required: is required
"""
def __init__(self, name, default=None, handler=None, required=True, type=str, filter=None, help=None,
nullable=False):
self.name = name
self.default = default
self.type = type
self.required = required
self.nullable = nullable
self.filter = filter
self.help = help
self.handler = handler
if not isinstance(self.name, str):
raise TypeError('Argument name must be string')
if filter and not callable(self.filter):
raise TypeError('Argument filter is not callable')
def parse(self, has_key, value):
if not has_key:
if self.required and self.default is None:
raise ParseError(
self.help or 'Required Error: %s is required' % self.name)
else:
return self.default
elif value in [u'', '', None]:
if self.default is not None:
return self.default
elif not self.nullable and self.required:
raise ParseError(
self.help or 'Value Error: %s must not be null' % self.name)
else:
return None
try:
if self.type:
if self.type in (list, dict) and isinstance(value, str):
value = json.loads(value)
assert isinstance(value, self.type)
elif self.type == bool and isinstance(value, str):
assert value.lower() in ['true', 'false']
value = value.lower() == 'true'
elif not isinstance(value, self.type):
value = self.type(value)
except (TypeError, ValueError, AssertionError):
raise ParseError(self.help or 'Type Error: %s type must be %s' % (
self.name, self.type))
if self.filter:
if not self.filter(value):
raise ParseError(
self.help or 'Value Error: %s filter check failed' % self.name)
if self.handler:
value = self.handler(value)
return value
# 解析器基类
class BaseParser(object):
def __init__(self, *args):
self.args = []
for e in args:
if isinstance(e, str):
e = Argument(e)
elif not isinstance(e, Argument):
raise TypeError('%r is not instance of Argument' % e)
self.args.append(e)
def _get(self, key):
raise NotImplementedError
def _init(self, data):
raise NotImplementedError
def add_argument(self, **kwargs):
self.args.append(Argument(**kwargs))
def parse(self, data=None, clear=False):
rst = AttrDict()
try:
self._init(data)
for e in self.args:
has_key, value = self._get(e.name)
if clear and has_key is False and e.required is False:
continue
rst[e.name] = e.parse(has_key, value)
except ParseError as err:
return None, err.message
return rst, None
# Json解析器
class JsonParser(BaseParser):
def __init__(self, *args):
self.__data = None
super(JsonParser, self).__init__(*args)
def _get(self, key):
return key in self.__data, self.__data.get(key)
def _init(self, data):
try:
if isinstance(data, (str, bytes)):
data = data.decode('utf-8')
self.__data = json.loads(data) if data else {}
else:
assert hasattr(data, '__contains__')
assert hasattr(data, 'get')
assert callable(data.get)
self.__data = data
except (ValueError, AssertionError):
raise ParseError('Invalid data type for parse')
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrapper for Resource Manager API client."""
from builtins import object
from googleapiclient import errors
from httplib2 import HttpLib2Error
from google.cloud.forseti.common.gcp_api import _base_repository
from google.cloud.forseti.common.gcp_api import api_helpers
from google.cloud.forseti.common.gcp_api import errors as api_errors
from google.cloud.forseti.common.gcp_api import repository_mixins
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
# TODO: This should have been 'cloudresourcemanager', however 'crm' is what was
# used in the existing server configuration, and will be difficult to change
# without breaking existing deployments. Consider fixing once an upgrade tool
# is created that can modify existing server configuration files.
API_NAME = 'crm'
class CloudResourceManagerRepositoryClient(
_base_repository.BaseRepositoryClient):
"""Cloud Resource Manager Respository."""
def __init__(self,
quota_max_calls=None,
quota_period=100.0,
use_rate_limiter=True,
credentials=None,
cache_discovery=False,
cache=None):
"""Constructor.
Args:
quota_max_calls (int): Allowed requests per <quota_period> for the
API.
quota_period (float): The time period to track requests over.
use_rate_limiter (bool): Set to false to disable the use of a rate
limiter for this service.
credentials (OAuth2Credentials): Credentials that will be used to
authenticate the API calls.
cache_discovery (bool): When set to true, googleapiclient will cache
HTTP requests to API discovery endpoints.
cache (googleapiclient.discovery_cache.base.Cache): instance of a
class that can cache API discovery documents. If None,
googleapiclient will attempt to choose a default.
"""
if not quota_max_calls:
use_rate_limiter = False
self._projects = None
self._organizations = None
self._folders = None
self._folders_v1 = None
self._liens = None
super(CloudResourceManagerRepositoryClient, self).__init__(
'cloudresourcemanager', versions=['v1', 'v2'],
quota_max_calls=quota_max_calls,
quota_period=quota_period,
use_rate_limiter=use_rate_limiter,
credentials=credentials,
cache_discovery=cache_discovery,
cache=cache)
# Turn off docstrings for properties.
# pylint: disable=missing-return-doc, missing-return-type-doc
@property
def projects(self):
"""Returns a _ResourceManagerProjectsRepository instance."""
if not self._projects:
self._projects = self._init_repository(
_ResourceManagerProjectsRepository)
return self._projects
@property
def organizations(self):
"""Returns a _ResourceManagerOrganizationsRepository instance."""
if not self._organizations:
self._organizations = self._init_repository(
_ResourceManagerOrganizationsRepository)
return self._organizations
@property
def folders(self):
"""Returns a _ResourceManagerFoldersRepository instance."""
if not self._folders:
self._folders = self._init_repository(
_ResourceManagerFoldersRepository, version='v2')
return self._folders
@property
def folders_v1(self):
"""Returns a _ResourceManagerFolderV1Repository instance."""
# Org Policy methods are only available on crm v1 currently.
if not self._folders_v1:
self._folders_v1 = self._init_repository(
_ResourceManagerFolderV1Repository, version='v1')
return self._folders_v1
@property
def liens(self):
"""Returns a _ResourceManagerLiensRepository instance."""
if not self._liens:
self._liens = self._init_repository(
_ResourceManagerLiensRepository)
return self._liens
# pylint: enable=missing-return-doc, missing-return-type-doc
class _ResourceManagerProjectsRepository(
repository_mixins.GetQueryMixin,
repository_mixins.GetIamPolicyQueryMixin,
repository_mixins.ListQueryMixin,
repository_mixins.OrgPolicyQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Cloud Resource Manager Projects repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_ResourceManagerProjectsRepository, self).__init__(
get_key_field='projectId', list_key_field=None,
max_results_field='pageSize', component='projects', **kwargs)
def get_ancestry(self, resource, **kwargs):
"""Get the project ancestory data.
Args:
resource (str): The project id or number to query.
**kwargs (dict): Additional parameters to pass through to
GetQueryMixin.get().
Returns:
dict: Response from the API.
"""
return repository_mixins.GetQueryMixin.get(
self, resource, verb='getAncestry', body=dict(), **kwargs)
@staticmethod
def get_name(project_id):
"""Format's an organization_id to pass in to .get().
Args:
project_id (str): The project id to query, either just the
id or the id prefixed with 'projects/'.
Returns:
str: The formatted resource name.
"""
if not project_id.startswith('projects/'):
project_id = 'projects/{}'.format(project_id)
return project_id
class _ResourceManagerOrganizationsRepository(
repository_mixins.GetQueryMixin,
repository_mixins.GetIamPolicyQueryMixin,
repository_mixins.OrgPolicyQueryMixin,
repository_mixins.SearchQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Cloud Resource Manager Organizations repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_ResourceManagerOrganizationsRepository, self).__init__(
key_field='name', max_results_field='pageSize',
search_query_field='filter', component='organizations', **kwargs)
@staticmethod
def get_name(organization_id):
"""Format's an organization_id to pass in to .get().
Args:
organization_id (str): The organization id to query, either just the
id or the id prefixed with 'organizations/'.
Returns:
str: The formatted resource name.
"""
if not organization_id.startswith('organizations/'):
organization_id = 'organizations/{}'.format(organization_id)
return organization_id
class _ResourceManagerFoldersRepository(
repository_mixins.GetQueryMixin,
repository_mixins.GetIamPolicyQueryMixin,
repository_mixins.ListQueryMixin,
repository_mixins.SearchQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Cloud Resource Manager Folders repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_ResourceManagerFoldersRepository, self).__init__(
list_key_field='parent', get_key_field='name',
max_results_field='pageSize', component='folders', **kwargs)
@staticmethod
def get_name(folder_id):
"""Format's an folder_id to pass in to .get().
Args:
folder_id (str): The folder id to query, either just the
id or the id prefixed with 'folders/'.
Returns:
str: The formatted resource name.
"""
if not folder_id.startswith('folders/'):
folder_id = 'folders/{}'.format(folder_id)
return folder_id
class _ResourceManagerFolderV1Repository(
repository_mixins.OrgPolicyQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Cloud Resource Manager Folders v1 repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_ResourceManagerFolderV1Repository, self).__init__(
list_key_field='parent', get_key_field='name',
max_results_field='pageSize', component='folders', **kwargs)
class _ResourceManagerLiensRepository(
repository_mixins.ListQueryMixin,
_base_repository.GCPRepository):
"""Implementation of Cloud Resource Manager Liens repository."""
def __init__(self, **kwargs):
"""Constructor.
Args:
**kwargs (dict): The args to pass into GCPRepository.__init__()
"""
super(_ResourceManagerLiensRepository, self).__init__(
list_key_field='parent', max_results_field='pageSize',
component='liens', **kwargs)
class CloudResourceManagerClient(object):
"""Resource Manager Client."""
def __init__(self, global_configs, **kwargs):
"""Initialize.
Args:
global_configs (dict): Forseti config.
**kwargs (dict): The kwargs.
"""
max_calls, quota_period = api_helpers.get_ratelimiter_config(
global_configs, API_NAME)
cache_discovery = global_configs[
'cache_discovery'] if 'cache_discovery' in global_configs else False
self.repository = CloudResourceManagerRepositoryClient(
quota_max_calls=max_calls,
quota_period=quota_period,
use_rate_limiter=kwargs.get('use_rate_limiter', True),
credentials=kwargs.get('credentials', None),
cache_discovery=cache_discovery,
cache=global_configs.get('cache'))
def get_project(self, project_id):
"""Get all the projects from organization.
Args:
project_id (str): The project id (not project number).
Returns:
dict: The project response object.
Raises:
ApiExecutionError: An error has occurred when executing the API.
"""
try:
result = self.repository.projects.get(project_id)
LOGGER.debug('Getting all the projects from organization,'
' project_id = %s, result = %s', project_id, result)
return result
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(project_id, e)
def get_projects(self, parent_id=None, parent_type=None, **filterargs):
"""Get all the projects the authenticated account has access to.
If no parent is passed in, then all projects the caller has visibility
to are returned. This is significantly less efficient then listing by
parent.
Args:
parent_id (str): The id of the organization or folder parent object.
parent_type (str): Either folder or organization.
**filterargs (dict): Extra project filter args.
Yields:
dict: The projects.list() response.
https://cloud.google.com/resource-manager/reference/rest/v1/projects/list#response-body
Raises:
ApiExecutionError: An error has occurred when executing the API.
"""
filters = []
for key, value in list(filterargs.items()):
filters.append('{}:{}'.format(key, value))
if parent_id:
filters.append('parent.id:{}'.format(parent_id))
if parent_type:
filters.append('parent.type:{}'.format(parent_type))
try:
for response in self.repository.projects.list(
filter=' '.join(filters)):
LOGGER.debug('Geting all the projects the authenticated'
' account has access to, parent_id = %s,'
' parent_type = %s, **filterargs = %s,'
' response = %s',
parent_id, parent_type, filterargs, response)
yield response
except (errors.HttpError, HttpLib2Error) as e:
if parent_id and parent_type:
resource_name = '{}/{}'.format(parent_type, parent_id)
else:
resource_name = 'All Projects'
raise api_errors.ApiExecutionError(resource_name, e)
def get_project_ancestry(self, project_id):
"""Get the full folder ancestry for a project.
Args:
project_id (str): Either the project number or the project id.
Returns:
list: The ancesters of the project, in order from direct parent to
root organization id.
"""
try:
results = self.repository.projects.get_ancestry(project_id)
ancestor = results.get('ancestor', [])
LOGGER.debug('Getting the full folder ancestry for a project,'
' project_id = %s, ancestor = %s',
project_id, ancestor)
return ancestor
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(project_id, e)
def get_project_iam_policies(self, project_id):
"""Get all the iam policies for a given project.
Args:
project_id (str): Either the project number or the project id.
Returns:
list: IAM policies of the project.
https://cloud.google.com/resource-manager/reference/rest/Shared.Types/Policy
"""
try:
results = self.repository.projects.get_iam_policy(project_id)
LOGGER.debug('Getting all the iam policies for a given project,'
' project_id = %s, results = %s', project_id, results)
return results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(project_id, e)
def get_project_org_policies(self, project_id):
"""Get all the org policies for a given project.
Args:
project_id (str): Either the project number or the project id.
Returns:
list: Org policies applied to the project.
https://cloud.google.com/resource-manager/reference/rest/v1/Policy
"""
resource_id = self.repository.projects.get_name(project_id)
try:
paged_results = self.repository.projects.list_org_policies(
resource_id)
flattened_results = api_helpers.flatten_list_results(
paged_results, 'policies')
LOGGER.debug('Getting all the org policies for a given project,'
' project_id = %s, flattened_results = %s',
project_id, flattened_results)
return flattened_results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(resource_id, e)
def get_organization(self, org_name):
"""Get organization by org_name.
Args:
org_name (str): The org name with format "organizations/$ORG_ID"
Returns:
dict: The org response object if found, otherwise False.
"""
name = self.repository.organizations.get_name(org_name)
try:
results = self.repository.organizations.get(name)
LOGGER.debug('Getting organization by org_name, org_name = %s,'
' results = %s', org_name, results)
return results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(org_name, e)
def get_organizations(self):
"""Get organizations that the authenticated account has access to.
Returns:
list: A list of Organization dicts as returned by the API.
"""
try:
paged_results = self.repository.organizations.search()
flattened_results = api_helpers.flatten_list_results(
paged_results, 'organizations')
LOGGER.debug('Getting organzations that the auth\'d account'
' has access to, flattened_results = %s',
flattened_results)
return flattened_results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError('All Organizations', e)
def get_org_iam_policies(self, org_id):
"""Get all the iam policies of an org.
Args:
org_id (int): The org id number.
Returns:
dict: Organization IAM policy for given org_id.
https://cloud.google.com/resource-manager/reference/rest/Shared.Types/Policy
Raises:
ApiExecutionError: An error has occurred when executing the API.
"""
resource_id = self.repository.organizations.get_name(org_id)
try:
results = self.repository.organizations.get_iam_policy(resource_id)
LOGGER.debug('Getting all the iam policies of an org, org_id = %s,'
' resource_id = %s, results = %s',
org_id, resource_id, results)
return results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(resource_id, e)
def get_org_org_policies(self, org_id):
"""Get all the org policies for a given org.
Args:
org_id (int): The org id number.
Returns:
list: Org policies applied to the organization.
https://cloud.google.com/resource-manager/reference/rest/v1/Policy
"""
resource_id = self.repository.organizations.get_name(org_id)
try:
paged_results = self.repository.organizations.list_org_policies(
resource_id)
flattened_results = api_helpers.flatten_list_results(
paged_results, 'policies')
LOGGER.debug('Getting all the org policies for a given org,'
' org_id = %s, resource_id = %s,'
' flattened_results = %s',
org_id, resource_id, flattened_results)
return flattened_results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(resource_id, e)
def get_folder(self, folder_name):
"""Get a folder.
Args:
folder_name (str): The unique folder name, with the format
"folders/{folderId}".
Returns:
dict: The folder API response.
Raises:
ApiExecutionError: An error has occurred when executing the API.
"""
name = self.repository.folders.get_name(folder_name)
try:
results = self.repository.folders.get(name)
LOGGER.debug('Getting folder by folder name, folder_name = %s'
' results = %s', folder_name, results)
return results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(folder_name, e)
def get_folders(self, parent=None, show_deleted=False):
"""Find all folders that the authenticated account has access to.
If no parent is passed in, then all folders the caller has visibility
to are returned. This is significantly less efficient then listing by
parent.
Args:
parent (str): Optional parent resource, either
'organizations/{org_id}' or 'folders/{folder_id}'.
show_deleted (bool): Determines if deleted folders should be
returned in the results.
Returns:
list: A list of Folder dicts as returned by the API.
Raises:
ApiExecutionError: An error has occurred when executing the API.
"""
if parent:
paged_results = self.repository.folders.list(
parent, showDeleted=show_deleted)
else:
query = ''
if not show_deleted:
query = 'lifecycleState=ACTIVE'
paged_results = self.repository.folders.search(query=query)
try:
flattened_results = api_helpers.flatten_list_results(
paged_results, 'folders')
LOGGER.debug('Getting all the folders that the auth\'d account'
' has access to, parent = %s, show_deleted = %s,'
' flattened_results = %s',
parent, show_deleted, flattened_results)
return flattened_results
except (errors.HttpError, HttpLib2Error) as e:
if parent:
resource_name = parent
else:
resource_name = 'All Folders'
raise api_errors.ApiExecutionError(resource_name, e)
def get_project_liens(self, project_id):
"""Get all liens for this project.
Args:
project_id (str): the id of the project.
Returns:
list: A list of Lien dicts as returned by the API.
Raises:
ApiExecutionError: An error has occurred when executing the API.
"""
project_id = self.repository.projects.get_name(project_id)
try:
paged_results = self.repository.liens.list(
project_id)
flattened_results = api_helpers.flatten_list_results(
paged_results, 'liens')
return flattened_results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(project_id, e)
def get_folder_iam_policies(self, folder_id):
"""Get all the iam policies of a folder.
Args:
folder_id (int): The folder id.
Returns:
dict: Folder IAM policy for given folder_id.
Raises:
ApiExecutionError: An error has occurred when executing the API.
"""
resource_id = self.repository.folders.get_name(folder_id)
try:
results = self.repository.folders.get_iam_policy(resource_id)
LOGGER.debug('Getting all the iam policies of a folder,'
' folder_id = %s, results = %s', folder_id, results)
return results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(resource_id, e)
def get_folder_org_policies(self, folder_id):
"""Get all the org policies for a given folder.
Args:
folder_id (int): The folder id.
Returns:
list: Org policies applied to the folder.
https://cloud.google.com/resource-manager/reference/rest/v1/Policy
"""
resource_id = self.repository.folders.get_name(folder_id)
try:
paged_results = self.repository.folders_v1.list_org_policies(
resource_id)
flattened_results = api_helpers.flatten_list_results(
paged_results, 'policies')
LOGGER.debug('Getting all the org policies of a given folder,'
' folder_id = %s, flattened_results = %s',
folder_id, flattened_results)
return flattened_results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(resource_id, e)
def get_org_policy(self, resource_id, constraint, effective_policy=False):
"""Get a specific org policy constraint for a given resource.
Args:
resource_id (str): The organization, folder or project resource to
query
constraint (str): The org policy constraint to query.
effective_policy (bool): If set to true, query the effective policy
instead of the currently set policy. This takes the resource
hierarchy into account.
Returns:
dict: An org policy resource.
Raises:
ValueError: Raised if the resource_id is not value.
"""
org_policy_method = None
repository = None
if resource_id.startswith('folders/'):
repository = self.repository.folders_v1
elif resource_id.startswith('organizations/'):
repository = self.repository.organizations
elif resource_id.startswith('projects/'):
repository = self.repository.projects
else:
raise ValueError(
'resource_id is not a valid resource: %s' % resource_id)
if effective_policy:
org_policy_method = getattr(repository, 'get_effective_org_policy')
else:
org_policy_method = getattr(repository, 'get_org_policy')
try:
results = org_policy_method(resource_id, constraint)
LOGGER.debug('Getting a specific org policy constraint for a given'
' resource, resouce_id = %s, constraint = %s,'
' effective_policy = %s, results = %s',
resource_id, constraint, effective_policy, results)
return results
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(resource_id, e)
|
import os
import argparse
from os.path import expanduser
import numpy as np
import configparser
import shutil
import pandas as pd
def get_file_names(dataset_path):
"""
get_file_names(str) -> list of HGG files , LGG files
It return the paths of the LGG and HGG files present in BraTS dataset
"""
lgg_dir = []
hgg_dir = []
for r, d, f in os.walk(dataset_path):
if 'LGG' in r:
for folder in d:
lgg_dir.append(os.path.join(r, folder))
elif 'HGG' in r:
for folder in d:
hgg_dir.append(os.path.join(r, folder))
return lgg_dir, hgg_dir
def get_rdm_indexes(ratio, length):
indexes = np.arange(0, length)
# shuffle
np.random.seed(seed=42)
np.random.shuffle(indexes)
return indexes[int(ratio * length):], indexes[:int(ratio * length)]
def create_symlinks(tr_hgg, te_hgg, tr_lgg, te_lgg):
cwd = os.getcwd()
dir_contents = dict(zip(['HGG_train', 'HGG_test', 'LGG_train', 'LGG_test'], [tr_hgg, te_hgg, tr_lgg, te_lgg]))
# for dataset type train test LGG or HGG
for d, sub_dirs in dir_contents.items():
dst_prefix = '{0}/dataset/{1}'.format(cwd, d)
shutil.rmtree(dst_prefix)
os.makedirs(dst_prefix)
# for each directory (patient)
for src in sub_dirs:
all_files = os.listdir(src)
# for each file type per patient (t1,t2,t1ce,flair,seg)
for file in all_files:
#print('{0}/{1}'.format(src, file))
os.symlink('{0}/{1}'.format(src, file), '{0}/{1}'.format(dst_prefix, file))
def create_symlinks_all_data(hgg, lgg):
cwd = os.getcwd()
dir_contents = dict(zip(['HGG', 'LGG'], [hgg, lgg]))
# for dataset type train test LGG or HGG
for d, sub_dirs in dir_contents.items():
dst_prefix = '{0}/dataset/{1}'.format(cwd, d)
shutil.rmtree(dst_prefix)
os.makedirs(dst_prefix)
# for each directory (patient)
for src in sub_dirs:
all_files = os.listdir(src)
# for each file type per patient (t1,t2,t1ce,flair,seg)
for file in all_files:
#print('{0}/{1}'.format(src, file))
os.symlink('{0}/{1}'.format(src, file), '{0}/{1}'.format(dst_prefix, file))
print('src', '{0}/{1}'.format(src, file))
print('dst', '{0}/{1}'.format(dst_prefix, file))
def create_symlinks_gt_brats(hgg, lgg ,year=17):
#cwd = os.getcwd()
dir_contents = dict(zip(['HGG', 'LGG'], [hgg, lgg]))
dest_dir = '/mnt/SYS866/data/gt{0}'.format(year)
# for dataset type train test LGG or HGG
for d, sub_dirs in dir_contents.items():
dst_prefix = '{0}/{1}'.format(dest_dir, d)
shutil.rmtree(dst_prefix)
os.makedirs(dst_prefix)
# for each directory (patient)
for src in sub_dirs:
all_files = os.listdir(src)
dst_folder = dst_prefix + '/{0}'.format(src.split('/')[-1])
print(dst_folder)
os.makedirs(dst_folder)
# for each file type per patient (t1,t2,t1ce,flair,seg)
for file in all_files:
#print('{0}/{1}'.format(src, file))
if 'seg' in file:
os.symlink('{0}/{1}'.format(src, file), '{0}/{1}'.format(dst_folder, file))
print('src', '{0}/{1}'.format(src, file))
print('dst', '{0}/{1}'.format(dst_folder, file))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_dir", type=str, default="mnt/dataset/MICCAI_BraTS_2019_Data_Training/", help="path to dataset")
parser.add_argument("--model_name", type=str, default="anisotropic_nets_brats_challenge", help="niftynet model name to use")
opt = parser.parse_args()
print(opt)
data_dir = opt.dataset_dir
modelname = opt.model_name
lgg, hgg = get_file_names(data_dir)
test_ratio = 0.1
train_hgg_idx, test_hgg_idx = get_rdm_indexes(test_ratio, len(hgg))
train_lgg_idx, test_lgg_idx = get_rdm_indexes(test_ratio, len(lgg))
# for i in [lgg[idx].split('/')[-1] for idx in test_lgg_idx]:
# print('{0},inference'.format(i))
# for i in [hgg[idx].split('/')[-1] for idx in test_hgg_idx]:
# print('{0},inference'.format(i))
# for i in [hgg[idx].split('/')[-1] for idx in test_hgg_idx]:
# print('HGG/{0}'.format(i))
#
# for i in [lgg[idx].split('/')[-1] for idx in test_lgg_idx]:
# print('LGG/{0}'.format(i))
# for i in [hgg[idx].split('/')[-1] for idx in train_hgg_idx]:
# print('HGG/{0}'.format(i))
test17 = pd.read_csv('patients_in19_not_in17.csv')
print(test17.columns)
train19_hgg_dir = [hgg[idx] for idx in train_hgg_idx]
test19_hgg_dir = [hgg[idx] for idx in test_hgg_idx]
train19_lgg_dir = [lgg[idx] for idx in train_lgg_idx]
test19_lgg_dir = [lgg[idx] for idx in test_lgg_idx]
print('Size of TRAIN dataset : HGG => {0} LGG => {1}'.format(len(train19_hgg_dir), len(train19_lgg_dir)))
print('Size of TEST dataset : HGG => {0} LGG => {1}'.format(len(test19_hgg_dir), len(test19_lgg_dir)))
#create_symlinks(train_hgg_dir, test_hgg_dir, train_lgg_dir, test_lgg_dir)
#create_symlinks_all_data(hgg, lgg)
test17_hgg_dir = [data_dir+x for x in test17.patient_id.values if 'HGG' in x]
test17_lgg_dir = [data_dir + x for x in test17.patient_id.values if 'LGG' in x]
create_symlinks_gt_brats(test17_hgg_dir, test17_lgg_dir)
|
"""Constants used in regular expressions."""
# Sentinel value.
# Useful to differentiate between an argument that has not been provided, and an
# argument provided with the value `None`.
SENTINEL_VALUE = object()
UNQUOTED_STRING = r'[^\s]+'
CAPTURE_UNQUOTED_STRING = r'[^\s]+'
ESCAPED_QUOTED_STRING = r""""(?:[^"\\]|\\.)*"|'(?:[^'\\]|\\.)*'"""
# Using this will capture a string with their surrounding quotes.
# e.g. "can't" will be captured as `"can't"` instead of `can't`.
CAPTURE_ESCAPED_QUOTED_STRING = r"({})".format(ESCAPED_QUOTED_STRING)
QUOTED_STRING = r"""(?:'|").*(?:'|")"""
CAPTURE_QUOTED_STRING = r"""(?:'|")(.*)(?:'|")"""
AVAILABLE_REGEXES = {
'UNQUOTED_STRING': UNQUOTED_STRING,
'CAPTURE_UNQUOTED_STRING': CAPTURE_UNQUOTED_STRING,
'CAPTURE_STRING': CAPTURE_QUOTED_STRING,
'STRING': QUOTED_STRING,
}
|
data = {'GoqMLRDAbGWn': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_lmpFmMCm}', 'FChsdhyFoJb7': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_OuEgWt8G}', 'cxjQDMhRaGEi': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_OnKirjt3}', '6R9gh04xijvL': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_KOodydBm}', '6XNGpyFKD8UI': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_E0c1sc1k}', 'z9U4w5M6iAhr': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_wJdqBIFZ}', 'zLxUq06hQZas': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_rW01tYwA}', 'W9V8ydNaojIj': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_UP544cZN}', 'MH8QOCHGUH7L': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_0Wigfnq7}', 'Y0dtX9sn2wBF': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_HLNDsdU4}', 'uZMpWdOzZhVh': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_F1uUl8Py}', 'h84K3u8RCCiV': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_SAlDd2T6}', 'c0DHKU8K7F0M': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_aNt3dzvV}', 'IpKWgxHCAwzD': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_vNNUFeqg}', '6M3F3BYLI9d6': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_qyU2oub8}', 'zIkPNBX1am9j': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_S23NJfYl}', 'NujQa1cM9kXr': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_r6sNDBRf}', 'C88xyVgLYYc4': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_riAP4fvJ}', 'tLnKRgxuFVHN': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_iUr3Spfm}', 'H7951eZqN8Vu': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_SW0pyvoc}', 'JcTFjJEuqVIJ': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_R28IyQD2}', 'rBg1DQWi64PL': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_aALdguGb}', 'njYmPguFUtgy': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_knxCGRXg}', 'nUs4QURaftXM': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_f7eM6CZ2}', 'Jm0SbxuPDgYJ': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_lCyWXrzS}', 'KBmlb3ZWJPTX': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_o742Txym}', 'VJxrqGrMDb9D': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_Kr7Xxxa7}', 'DSTinL4ZJpqy': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_H1SKtwlf}', 'j4yqLHYMYfK7': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_Fy2juCqm}', 'FusGT40nnN3V': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_K6m63lVi}', 'nnb8AfRSQcTh': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_fjWTHmOI}', 'O0qqOPvqjiWO': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_JEjLNLN2}', 'cwS6OMc6m9WY': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_OCVSIkhN}', 'vqsYMQGtepUU': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_I9mY53rY}', 'MMxktobusSAW': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_wcIzMw05}', 'A2H2U3T7pF5Q': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_oMs6xVZ5}', 'ygNHeAbA4xIN': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_sMWqRf3P}', '6zgpDwdSAOgd': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_M1zdtE7v}', 'BMfgwoceGm7v': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_nFvdHlZC}', 'QQxuYppa0zyh': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_PDEQnRgP}', 'BbZkcNoLdf1d': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_Un0j2ntL}', '27zSI81RxphZ': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_fOtv6NBu}', 'WQje8FOky6P4': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_V9Ibwof6}', 'ZlSztvmMIV9D': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_cFj2crjI}', 'Iyv0FYZIC6wv': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_2BHV1U9J}', 'PIX4pJ3072Ld': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_ly8WweWV}', 'cAInnUlE1Z1V': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_zZG2MRs5}', 'rMpD7g854F8l': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_Jp3msWlv}', 'kEa9fQTjBCDc': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_LzCS1sD0}', '96KZXZEDwcqW': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_rj4T3VEM}', '90C4no86uaN0': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_AkGlBlr1}', 'Fn7q0pBVYfz5': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_bhoSHuww}', 'OlAywRjHwbql': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_J053FSCx}', 'jBx2lkhR73OV': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_iyE9u9RC}', 's65Rr43cWIfO': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_0KuOxW8i}', 'EFhxbTBynjHH': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_DOV9c9vg}', '0fl27x1jDhVw': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_WHmfprJx}', 'GodNiA1SKwRk': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_jPh0p3UQ}', '0QiydH14153S': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_UsbsoWcl}', 'AGy3ewA1W4fz': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_9RUzBz88}', 'Gi1dwhjlD5lp': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_dV5eh26Q}', '32ISB2G2rbQy': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_vGLgDmEO}', '3OoU3HUmuaLq': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_4t0aZoJ8}', 'lYODemC5joBR': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_HVcobM4l}', 't1VY3eHhlWn2': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_XRYMXBEB}', '2wI0rCj9Gc4X': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_jWas99sa}', 'eeD3Eq9D9qeC': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_YDLPnced}', 'zbue0FXBa4zl': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_mIGPkx2D}', 'z0ccfeYvjANQ': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_H1YDHX7e}', 'NdSbqerR24hx': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_R4i94UiN}', '0M4HOLz4ufSk': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_jC4DnejJ}', 'quwtODoE5uWj': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_tTE0VEjh}', 'U5MCOxvpoP1o': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_ohqMEKgh}', '3YYX7yoo1mcr': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_XVp78tBI}', 'WwgDMsqD5OcD': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_BPylqC8x}', '49d7ubGbdtBD': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_x4bZJjwK}', '9t27PWgdH65i': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_6kPAqnUM}', 'JVKhOMmPYtVd': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_SgiaeADQ}', 'yOLLPHXvGQro': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_VW2FnuQg}', 'QPX0zGRlcj4n': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_hQqp9xuF}', 'J2GFvqjd5tg7': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_35oJ14kS}', 'bCJmajSFPRw4': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_mG9MbaP4}', 'UeD2Am9VdmVV': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_WYVyxI8n}', 'OVTwjY7pB6Zi': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_P3DFL2pg}', 'FSmDEK59OgBc': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_KcbFJWfL}', 'Y14LGjESpaeQ': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_EK2Wwrfu}', '0hk9Qfr28pfm': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_NdZvFSO8}', 'ExJRFl92Iydb': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_L5Rco7vZ}', 'DNPmygOABptD': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_qvhwQPvS}', 'qiIp0rVEc5zM': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_1ZJQ3DPK}', 'p5X5zkhdvpr4': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_nASs2w9m}', 'wFULSfUyI3dE': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_jVU45hTT}', '17PtYpehq7KN': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_mYd37cH0}', 'zIOgSmsZZ1hV': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_DFE05fw9}', 'OEtTKKucMaHg': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_5Xebukq0}', 'gvgiDRR4l7PO': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_nWAl1P2H}', 'FC1Qy3EOJDmi': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_R25qtkWQ}', 'I1TTnu6CvQLg': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_2WZqftfo}', 'qBtYm4tlClOn': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_5DLEuA7x}', 'K1iWSLUA7TjU': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_noGLbQlN}', 'XDQfuzgYqvpD': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_9ApeptKU}', 'lucbamSPR6o6': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_LmSbT4hJ}', '7EHChnbyQkYA': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_HYEks4HJ}', 'sOL4BqCK7sFC': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_m6FEXo5e}', 'z9ek0EqjnblC': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_3gyWL6IK}', 'QDEW2hxzimEL': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_0VEWsk88}', 'pRHnMMIZHT08': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_ow3UWKX6}', 'jptkzdK8jtD3': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_WJZNvZZS}', 'NXW9YE1OIXZr': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_trB5on4j}', 'm6BzhExlSQu8': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_mbrPTOHP}', 'NSJ9awoddIaD': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_hK2XMOMR}', 'YJRCRpRKzTYu': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_1nEUGAyR}', 'UHjM5dyheO1g': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_RF9HsQXH}', '7cgL2cRgZKsh': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_KCXpAfGZ}', 'EpnSD0nRTXcc': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_atIpjH1l}', '6xHLCKRKKjNd': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_4B55YTom}', 'bXbeVHQGemB6': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_r6Vh39f7}', '6Vt8UD1nJvCf': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_VKCQnQ0Z}', 'v3dGNGOxAzEK': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_1MKjebf1}', 'xV4CIkUQWfQy': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_A5lniKJC}', 'yo6MyFO37eg2': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_NNOR8N0B}', 'Z4IT3S6h6C41': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_zNwldW6p}', 'rc9Pj5sJK2aT': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_R2dNr6F8}', 'D7K2FZm8xblo': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_EAViBuc4}', 'BSRpVJV7oPJ7': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_HT8E8NMd}', '7llltu8bHSZz': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_9cEYBSiQ}', 'nxBkkao1X7pU': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_QLUVIXD5}', 'osYcLaHH3w3h': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_c95X82BU}', 'LRs3sGuiuR0H': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_zgEuluZR}', 'gCQ5EdzEse0s': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_kqRVbogy}', 'orZmOn0840Eu': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_uHdIWhrb}', 'TnZHEcBJckdg': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_PFvdl20L}', 'N8CYH9gxiKjn': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_OJzTEcgd}', 'AlZ1u5DCVdTX': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_HgYjjpxP}', '5VP5jYtijjfB': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_E6M7b36T}', 'cpdluUmCYvTO': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_U5KHmNSJ}', 'SXjF53o7z1w5': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_4T5wS7CI}', 'YKh49Cq7J95c': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_jn7Ok2cv}', 'rsCOne2If67y': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_HZdAieMb}', '7whNnPVlbs6R': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_vwitLxL3}', '0zQMtusnatPy': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_mgSKf2J8}', '79DT5e7Zu60V': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_4oiez4CN}', 'u9MFMVzba7QF': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_Mz7VmDdx}', 'yaiVVL9OOUov': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_CMgB5Mtv}', 'fBQyjNQgVFYa': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_uaLF9EzN}', 'AJFPfs4hpqi9': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_O8HJM5Jg}', 'otToAsehSohC': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_inYCAggq}', '8gvs825PvNAi': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_PGK7CsVV}', 'pxvSZk8fmjY7': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_ywmGeiBL}', 'Y9SJ35wrJQ96': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_HSlq5ajJ}', 'kGSl8hmBok2v': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_2WYXn5XZ}', 't2cLfQzHnjPF': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_fDkREvel}', 'bKfAXXVREByy': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_mbS3XP9O}', 'ymbbqTb0MyfA': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_Jyp3wyX2}', 'bHIFc9KX9JDU': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_1HAuhW67}', 'kI5urDV1Lbao': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_sd03C1b0}', '0RW7AYPb3c3P': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_aLUWDKpF}', 'YmdgONKD8sG0': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_Yb3IyZEF}', 'Rcs0egVus0Pj': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_ZfCTs7sm}', 'gtTqyrcSinxh': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_oyReCrC7}', 'WV5m5h2pMCSs': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_zPKvWX2P}', 'pLhbNOXXTy66': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_W0lfFuXu}', 'lK9Vk9Mubj2D': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_nrz2gM1Z}', 'BbqmnQcDuQum': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_h2kj4AKy}', 'J9LfQvgvqcfL': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_8sMX709Q}', 'bODeHNkvOvuo': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_6EdrgCxp}', 'Xy7K88imNnzJ': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_sNMUt0lb}', 'fCooUPgEJQAK': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_YpBV2S1s}', '5hWsBwdLCsLE': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_rslFyXwy}', 's7YgYYJhter3': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_uXrRTIGS}', 'SeJFvE6VqWsM': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_pbtmib1u}', '5e4lwIunlfpj': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_LfxPIlBy}', '1QycjAJBrUjL': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_TYW7c2N9}', 'UM5iRsUXivOY': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_D3nYtN1v}', 'Rs3q5CuPEj1O': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_Vaza5jXc}', 'cMIpUz6VmfRA': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_ZJ7lKsOH}', 'RSYw0mMqmbjW': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_VkXeVBBN}', 'HSNTx1LIVC1g': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_AWaAKP7X}', 'I0Rbh3n4OY98': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_GsK9hEq2}', 'QlmklPBfUcL2': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_z2KEKpSK}', 'jwFpePserw86': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_XtaznnNA}', 'dksmUIoxigr2': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_0eurs2id}', 'EGjCWJqPMVwJ': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_PPXoJqwq}', 'frN2m9JUF440': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_qGdE1fRb}', 'mXHQD5uJTeuL': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_hrIXvyCM}', 'XlqpBxnuCSPT': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_oygYE1pO}', 'GFjE74czcnSj': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_TNlmv3Fl}', 'sgSN7UH5Ouex': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_a9pzlNvs}', 'r7ioonGscyLh': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_gTOXiOPi}', 'L5J9bYoYfdsN': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_IiHEMvAj}', 'U18V11waUNKN': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_UZlc5nAk}', '9E53hCTR7ZIA': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_m7GL3b9a}', 'QsVFVnYB4lva': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_Wf2CrNO1}', 'HWKExQKCZlXs': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_gwMdLomK}', 'biTYZ1lu1hrd': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_zhsqW5FQ}', 'IZ8HRJyVRW42': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_ZOuv7HLD}', 'TyvSPz75qzIS': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_SYBkONdq}', 'riVrfGTVDGhw': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_NGe5sqlm}', 'Nby8NeGcaWap': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_hwjI26QS}', 'ZlF18lq5x4OA': 'LKSHL{th1s_is_a_trivial_t4sk_on_a_CTF_8saSWAP8}'}
|
from rest_framework.exceptions import MethodNotAllowed
from rest_framework.permissions import IsAuthenticated
from app.viewsets import AppViewSet
from homework.api.filtersets import AnswerFilterSet
from homework.api.permissions import (
MayDeleteAnswerOnlyForLimitedTime, ShouldBeAnswerAuthorOrReadOnly, ShouldHavePurchasedQuestionCoursePermission)
from homework.api.serializers import AnswerCreateSerializer, AnswerTreeSerializer
from homework.models import Answer, AnswerAccessLogEntry
class AnswerViewSet(AppViewSet):
queryset = Answer.objects.for_viewset()
serializer_class = AnswerTreeSerializer
serializer_action_classes = {
'create': AnswerCreateSerializer,
}
lookup_field = 'slug'
permission_classes = [
IsAuthenticated &
ShouldHavePurchasedQuestionCoursePermission &
ShouldBeAnswerAuthorOrReadOnly &
MayDeleteAnswerOnlyForLimitedTime,
]
filterset_class = AnswerFilterSet
def update(self, request, **kwargs):
raise MethodNotAllowed(request.method)
def get_queryset(self):
queryset = super().get_queryset()
queryset = self.limit_queryset_to_user(queryset)
return self.limit_queryset_for_list(queryset)
def limit_queryset_to_user(self, queryset):
if not self.request.user.has_perm('homework.see_all_answers') and self.action != 'retrieve':
return queryset.for_user(self.request.user)
return queryset
def limit_queryset_for_list(self, queryset):
if self.action == 'list':
return queryset.root_only()
return queryset
def get_object(self):
"""Write a log entry for each answer from another user that is retrieved
"""
instance = super().get_object()
self.write_log_entry(answer=instance)
return instance
def write_log_entry(self, answer):
if not self.request.user.has_perm('homework.see_all_answers'):
if answer.author != self.request.user:
AnswerAccessLogEntry.objects.get_or_create(
user=self.request.user,
answer=answer,
)
|
import pickle
from intersim.datautils import *
from intersim.graph import InteractionGraph
from intersim.collisions import *
import torch
LOCATIONS = ['DR_USA_Roundabout_FT',
'DR_CHN_Roundabout_LN',
'DR_DEU_Roundabout_OF',
'DR_USA_Roundabout_EP',
'DR_USA_Roundabout_SR']
def add_edges(neighbor_dict,edges):
for (i,j) in edges:
if j not in neighbor_dict[i]:
neighbor_dict[i].append(j)
def main(locnum, tracks, settings, frames):
name = LOCATIONS[locnum]
outdir = './experiments/results/'
v_target = 11.17
for setting in settings:
print(setting)
# collisions
# hand-counted
# if setting == 'idm':
# collisions = torch.tensor([36., 23., 34., 21., 24.]) ## VERY WRONG - true = [99,30,47,54,49]
# elif setting == 'cnash':
# collisions = torch.tensor([0., 5., 2., 12., 5.]) ## pretty close - true = [0,5,2,11,6]
# elif setting == 'decnash':
# collisions = torch.tensor([0., 0., 0., 0., 1.]) ## right
# actual
cs = []
for track in tracks:
fullname=name+'_track%03i_%s_frames%04i'%(track,setting,frames)
s = torch.load(outdir+fullname+'_states.pt')
l = torch.load(outdir+fullname+'_lengths.pt')
w = torch.load(outdir+fullname+'_widths.pt')
c = count_collisions(s, l, w)
cs.append(c)
collisions = torch.tensor(cs)
print("Collisions: %7.4f \pm %7.4f" %(collisions.mean(), collisions.std()/(len(collisions)**.5)))
# shortfall
vs = []
for track in tracks:
fullname=name+'_track%03i_%s_frames%04i'%(track,setting,frames)
filename=outdir+fullname+'_states.pt'
s = torch.load(filename)
s = s.reshape(s.shape[0], -1, 5)
v = s[:,:,2]
for i in range(v.shape[1]):
nni = ~torch.isnan(v[:,i])
v_car = v[nni,i]
if len(v_car)>0:
vs.append(v_car.mean().item())
vs = np.array(vs)
print("Velocity Shortfall: %7.4f \pm %7.4f" %(v_target-vs.mean(), vs.std()/(len(vs)**.5)))
# players
ps = []
if setting == 'idm':
players = torch.tensor([1., 1.])
else:
for track in tracks:
fullname=name+'_track%03i_%s_frames%04i'%(track,setting,frames)
filename=outdir+fullname
graph_list=pickle.load(open(filename+'_graphs.pkl', 'rb'))
states = torch.load(filename+'_states.pt')
states_reshaped = states.reshape(states.shape[0], -1, 5)
for i in range(1,len(graph_list)-1):
edges = graph_list[i]
state = states_reshaped[i]
v = state[:,2]
nni = ~torch.isnan(v)
neighbor_dict = {i:[] for i in range(len(v)) if nni[i]}
add_edges(neighbor_dict, edges)
g = InteractionGraph(neighbor_dict)
sccs = g.strongly_connected_components
ps.append(max([0]+[len(scc[0]) for scc in sccs]))
players = torch.tensor(ps, dtype=torch.float)
print("Players: %6.4f \pm %6.4f" %(players.mean(), players.std()))
# times
ts = []
for track in tracks:
fullname=name+'_track%03i_%s_frames%04i'%(track,setting,frames)
filename=outdir+fullname+'_times.csv'
ts.append(np.loadtxt(filename)[1:])
times = np.concatenate(ts)
print("Times: %6.4f \pm %6.4f" %(times.mean(), times.std()))
print("")
if __name__ == "__main__":
locnum = 0
tracks = list(range(5))
settings = ['idm','cnash','decnash']
frames = 1000
main(locnum, tracks, settings, frames)
|
import matplotlib.pyplot as plt
import numpy as np
def fgairing(n):
from math import factorial
f = [0]*(n+1)
for j in range(1, n+1):
t1 = 1./((n-1)*factorial(n-1))
t2 = sum([1./factorial(e) for e in range(j, n)])
t3 = sum([1./factorial(e) for e in range(1, n)])
f[j] = factorial(j-1) * (t1 + t2) / (t1 + t3)
return f
def fMC(n): return [0, 1] + [0]*(n-1)
def setw(n): return [0] + [1]*n
def tradeoffFigf():
def pobset(C):
from math import factorial
suml = 30
sumval = 0
for i in range(suml):
tfact = factorial(i)
tsum = 0
for j in range(1, i+1):
tsum += factorial(i)/factorial(j)
sumval += max(tfact - (1-C)*tsum/C, 0)
return (sumval + 1.)**(-1)
step = .001
poa = np.arange(step, .634, step)
pob = [pobset(v) for v in poa]
plt.plot(poa, pob, 'k')
plt.title('PoA v.s. PoB tradeoff')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel('PoA(f)')
plt.ylabel('PoB(f, 1)')
plt.show()
class SimFig():
@classmethod
def randomResActions(cls, n):
values = list(np.round(np.random.uniform(size=2*n), 1))
all_actions = []
for i in range(n):
actions = [[0], [0]]
actions[0][0] = np.random.choice(range(n)) + 1
actions[1][0] = np.random.choice(range(n, 2*n)) + 1
actions = [[0]] + actions
all_actions += [actions]
values = [0.] + values
return values, all_actions
@classmethod
def bestResp(cls, Game, i, act):
best = 0
bestUtil = 0
act = np.copy(act)
for j in range(len(Game.actions[i])):
act[i] = j
util = Game.U_i(i, act)
if util > bestUtil:
best = j
bestUtil = util
act[i] = best
return act
@classmethod
def brPath(cls, Game, initial, plySeq):
steps = len(plySeq)
actions = [initial] + [None]*steps
for c in range(1, steps+1):
actions[c] = cls.bestResp(Game, plySeq[c-1], actions[c-1])
return actions
@classmethod
def wRound(cls, WG, K, n): return [WG.welfare(act) for act in cls.brPath(WG, [0]*n, list(range(n))*K)]
@classmethod
def getSamples(cls, samples, K, n):
from games.types.wresource import WResourceFactory
wdataMC = np.zeros((samples, K*n+1))
wdataGar = np.zeros((samples, K*n+1))
diff = np.zeros((samples, K*n+1))
for c in range(samples):
values, all_actions = cls.randomResActions(n)
WgMc = WResourceFactory.make_game(all_actions, values, setw(n), fMC(n))
WgGar = WResourceFactory.make_game(all_actions, values, setw(n), fgairing(n))
wdataMC[c, :] = cls.wRound(WgMc, K, n)
wdataGar[c, :] = cls.wRound(WgGar, K, n)
diff[c, :] = [e1/e2 if e2 > 0 else 1 for e1, e2 in zip(cls.wRound(WgMc, K, n), cls.wRound(WgGar, K, n))]
print(c)
return wdataMC, wdataGar, diff
@classmethod
def plotBRavg(cls):
n = 10
K = 4
SAMP = 100
steps = range(K*n+1)
_, _, diffresults = cls.getSamples(SAMP, K, n)
diffavg = np.mean(diffresults, 0)
mindiffround = np.amin(diffresults, 0)
maxdiffround = np.amax(diffresults, 0)
plt.plot(steps, diffresults.T, 'r.', alpha=.1)
plt.plot(steps, diffavg, 'k--')
plt.plot(steps, mindiffround, 'r', alpha=.5)
plt.plot(steps, maxdiffround, 'r', alpha=.5)
plt.plot([0, K*n+1], [1, 1], 'k', alpha=.2)
for c in range(K+1):
plt.plot([c*n, c*n], [0, 2], 'k', alpha=.2)
ax = plt.gca()
ax.set_xlim([0, K*n])
ax.set_ylim([.7, 1.4])
#plt.grid()
plt.show()
def plotcurve():
dx = .01
rangex = np.arange(0, 1+dx, dx)
rangePOBopt = 1 - rangex/2.
rangePOAopt = 1 - rangex/np.exp(1)
rangeMC = (1 + rangex)**-1
plt.plot(rangex, rangePOBopt)
plt.plot(rangex, rangePOAopt)
plt.plot(rangex, rangeMC)
plt.xlim(0, 1)
plt.ylim(0.5, 1)
plt.show()
def abcovering(n, a, b):
from math import factorial
p = (1 - a*b**b*np.exp(-b)/factorial(b))**-1
f = [0, 1.]
for i in range(1, n):
val1 = 1 - a
Vab = (1-a)*i + a*min(i, b)
val2 = 1/b*(i*f[i] - Vab*p) + 1
val = max(val1, val2 - .001)
f.append(val)
w = [0] + [(1-a)*i + a*min(i, b) for i in range(1, n+1)]
return w, f
def tradeoffcurv():
from games.price_of_anarchy import res_opt_f
from asymopt import getPoB
dc = .01
n = 40
ccs = np.arange(0, 1+dc, dc)
pobs = [0]*len(ccs)
poaccs = 1 - ccs/2
for i, c in enumerate(ccs):
w, f = abcovering(n, c, 1)
print('HHHHEEEEEEELLLLLOOOOOOOO', c, w, f)
pobs[i] = getPoB(w, f, n)[0]**(-1)
# print(pobs)
plt.plot(ccs, poaccs)
plt.plot(ccs, pobs)
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.show()
def submod():
x = range(0, 11)
y = [0]*11
for i in range(10):
y[i+1] = y[i] + 3/(4*x[i+1]) + 1/4
plt.plot(x, y, '.-')
plt.xlim(0, 10)
plt.ylim(0, 5)
plt.show()
def plotcurve2():
dx = .01
rangex = np.arange(0, 1+dx, dx)
rangePOBopt = 1 - rangex/2.
rangePOAopt = 1 - rangex/np.exp(1)
rangeMC = (1 + rangex)**-1
plt.plot(rangex, (rangePOAopt - rangeMC)/rangeMC)
plt.plot(rangex, (rangePOBopt - rangeMC)/rangeMC)
plt.xlim(0, 1)
plt.ylim(0, .3)
plt.show()
SimFig.plotBRavg()
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 30 13:35:44 2018
@author: charlie
Script to export study subcatchment attribute table to .csv
"""
from grass.pygrass.modules.shortcuts import database as db
basins = 'all_unique_wsheds_info'
csv_name = 'basins_data_with_intra_relief.csv'
db.out_ogr(input=basins, output=csv_name)
|
#!/usr/bin/python
# Apple II Chat server program
import struct, socket, select, time
import xml.etree.ElementTree as ET
HOST = '' # Symbolic name meaning all available interfaces
PORT = 0x6502 # Apple II Chat non-privileged port
VERSION = 1
client_list = {}
chat_files = {}
chat_vers = []
def client_add(address, port, handle):
global client_list
client_list[address] = (port, handle)
print "Welcome, ", handle, "@", address, ":", port
def broadcast(handle, msg):
global client_list
if msg:
print handle, ": ", msg
bcastmsg = struct.pack('<HHHBc8p32p', 0x7EDA, VERSION, 0, 0xCA, 'C', handle, msg)
for c in client_list:
client = (c, client_list[c][0])
s.sendto(bcastmsg, client)
def send_update(client, ver):
updatemask = 0
updatelist = []
for i in xrange(ver, VERSION):
updatemask |= chat_vers[i]
for f in chat_files:
if updatemask & chat_files[f][2]:
updatelist.append(f)
print "Update client version ", ver, " with:", updatelist
pkthdr = struct.pack('<HHHBc', 0x7EDA, VERSION, 0, 0xCA, 'U')
pktmsg = ""
for f in updatelist:
pktmsg += struct.pack('<17pBH', f, chat_files[f][0], chat_files[f][1])
pktmsg += struct.pack('B', 0)
s.sendto(pkthdr + pktmsg, client)
#
# Read version XML file
#
tree = ET.parse('chat-version.xml')
root = tree.getroot()
for chatfile in root.findall('{updates}file'):
fname = chatfile.get('name')
ftype = int(chatfile.get('type'), 0)
faux = int(chatfile.get('aux'), 0)
fmask = int(chatfile.get('mask'), 0)
chat_files[fname] = (ftype, faux, fmask)
for chatver in root.findall('{updates}version'):
chat_vers.insert(int(chatver.get('level')), int(chatver.get('updates'), 0))
chatver = root.find('{updates}current')
VERSION = int(chatver.get('level'))
print "CHAT server version:", VERSION
#
# Initialize UDP socket
#
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((HOST, PORT))
p = select.poll()
p.register(s.fileno(), select.POLLIN)
#
# Main server loop
#
while 1:
if p.poll(1000):
data, client = s.recvfrom(2048)
address, port = client
magic, ver, seq, hw, req = struct.unpack_from('<HHHBc', data)
if req == 'H':
handle, = struct.unpack_from('8p', data, 8)
if ver <> VERSION:
send_update(client, ver)
else:
client_add(address, port, handle)
s.sendto(struct.pack('<HHHBc', magic, ver, seq, 0xCA, 'W'), client)
broadcast(handle, "")
elif req == 'C':
try:
msg, = struct.unpack_from('32p', data, 8)
handle = client_list[address][1]
broadcast(handle, msg)
except:
s.sendto(struct.pack('<HHHBc', 0x7EDA, VERSION, 0, 0xCA, 'E'), client)
elif req == 'F':
try:
filename, fileblock = struct.unpack_from('<17pH', data, 8)
f = open('clientfiles/'+filename, 'r')
f.seek(fileblock * 1024)
msg = f.read(1024)
f.close()
s.sendto(struct.pack('<HHHBc', 0x7EDA, VERSION, seq, 0xCA, 'F') + msg, client)
except:
s.sendto(struct.pack('<HHHBc', 0x7EDA, VERSION, seq, 0xCA, 'E'), client)
else:
print "Unknown request: " + req
else:
pass
s.close()
|
'''
Created on Oct 30, 2015
@author: kashefy
'''
from nose.tools import assert_equal, assert_false, \
assert_list_equal, assert_true
from mock import patch, PropertyMock
import os
import tempfile
import shutil
import numpy as np
from numpy.testing import assert_array_equal
import h5py
import nideep.eval.inference as infr
import sys
CURRENT_MODULE_PATH = os.path.abspath(sys.modules[__name__].__file__)
ROOT_PKG_PATH = os.path.dirname(os.path.dirname(CURRENT_MODULE_PATH))
TEST_DATA_DIRNAME = 'test_data'
TEST_NET_FILENAME = 'n1.prototxt'
TEST_NET_HDF5DATA_FILENAME = 'n1h.prototxt'
class Bunch:
def __init__(self, **kwds):
self.__dict__.update(kwds)
class TestInference:
@patch('nideep.eval.inference.caffe.Net')
def test_forward(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
assert_false(net.forward.called, "Problem with mocked forward()")
out = infr.forward(net, ['x', 'z'])
assert_true(net.forward.called, "Problem with mocked forward()")
assert_list_equal(out.keys(), ['x', 'z'])
for k in ['x', 'z']:
assert_equal(out[k].shape, (3, 2),
msg="unexpected shape for blob %s" % k)
assert_array_equal(b[k].data, out[k])
# repeat with smaller set of keys
out = infr.forward(net, ['z'])
assert_list_equal(out.keys(), ['z'])
assert_equal(out['z'].shape, (3, 2), msg="unexpected shape for blob z")
assert_array_equal(b['z'].data, out['z'])
class TestInferenceEstNumFwdPasses():
@patch('nideep.iow.dataSource.DataSourceLMDB')
def test_est_num_fwd_passes_caffe_lmdb(self, mock_ds):
# we know the batch sizes from the prototxt file
fpath_net = os.path.join(ROOT_PKG_PATH, TEST_DATA_DIRNAME, TEST_NET_FILENAME)
mock_ds.return_value.num_entries.return_value = 77 * 64 # got batch size 64 from files directly
assert_equal(77, infr.est_min_num_fwd_passes(fpath_net, 'train'))
mock_ds.return_value.num_entries.return_value = 33 * 100 # got batch size 64 from files directly
fpath_net = os.path.join(ROOT_PKG_PATH, TEST_DATA_DIRNAME, TEST_NET_FILENAME)
assert_equal(33, infr.est_min_num_fwd_passes(fpath_net, 'test'))
@patch('nideep.iow.dataSource.DataSourceH5List')
def test_est_num_fwd_passes_caffe_h5list(self, mock_ds):
# we know the batch sizes from the prototxt file
fpath_net = os.path.join(ROOT_PKG_PATH, TEST_DATA_DIRNAME, TEST_NET_HDF5DATA_FILENAME)
mock_ds.return_value.num_entries.return_value = 44 * 64 # got batch size 64 from files directly
assert_equal(44, infr.est_min_num_fwd_passes(fpath_net, 'train'))
mock_ds.return_value.num_entries.return_value = 11 * 128 # got batch size 64 from files directly
fpath_net = os.path.join(ROOT_PKG_PATH, TEST_DATA_DIRNAME, TEST_NET_HDF5DATA_FILENAME)
assert_equal(11, infr.est_min_num_fwd_passes(fpath_net, 'test'))
class TestInferenceHDF5:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_h5_fixed_dims(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(1, 1 + idx, 3, 2 * (idx + 1))) for idx, k in enumerate(['x', 'y', 'z'])}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
fpath = os.path.join(self.dir_tmp, 'test_infer_to_h5_fixed_dims.h5')
assert_false(os.path.isfile(fpath))
out = infr.infer_to_h5_fixed_dims(net, ['x', 'z'], 1, fpath)
assert_equal(net.forward.call_count, 1)
assert_true(os.path.isfile(fpath))
assert_list_equal(out, [1, 1])
# check db content
with h5py.File(fpath, "r") as f:
assert_list_equal([str(k) for k in f.keys()], ['x', 'z'])
for idx, k in enumerate(['x', 'y', 'z']):
if k == 'y':
assert_false(k in f, "Unexpected key found (%s)" % k)
else:
assert_equal(f[k].shape, (1, 1 + idx, 3, 2 * (idx + 1)),
msg="unexpected shape for blob %s" % k)
assert_array_equal(b[k].data, f[k])
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_h5_fixed_dims_n(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(1, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
for n in range(1, 10):
net = mock_net()
net.reset_mock()
fpath = os.path.join(self.dir_tmp, 'test_infer_to_h5_fixed_dims_n.h5')
out = infr.infer_to_h5_fixed_dims(net, ['x', 'z'], n, fpath)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n, n])
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_h5_fixed_dims_preserve_batch_no(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
fpath = os.path.join(self.dir_tmp, 'test_infer_to_h5_fixed_dims_preserve_batch_no.h5')
assert_false(os.path.isfile(fpath))
n = 3
out = infr.infer_to_h5_fixed_dims(net, ['x', 'z'], n, fpath,
preserve_batch=False)
assert_equal(net.forward.call_count, n)
assert_true(os.path.isfile(fpath))
assert_list_equal(out, [n * 4] * 2)
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_h5_fixed_dims_preserve_batch_yes(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
fpath = os.path.join(self.dir_tmp, 'test_infer_to_h5_fixed_dims_preserve_batch_yes.h5')
assert_false(os.path.isfile(fpath))
n = 3
out = infr.infer_to_h5_fixed_dims(net, ['x', 'z'], n, fpath,
preserve_batch=True)
assert_equal(net.forward.call_count, n)
assert_true(os.path.isfile(fpath))
assert_list_equal(out, [n] * 2)
class TestInferenceLMDB:
@classmethod
def setup_class(self):
self.dir_tmp = tempfile.mkdtemp()
@classmethod
def teardown_class(self):
shutil.rmtree(self.dir_tmp)
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_fixed_dims(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(1, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_fixed_dims_%s_lmdb')
for k in b.keys():
assert_false(os.path.isdir(dst_prefix % k))
out = infr.infer_to_lmdb(net, ['x', 'z'], 1, dst_prefix)
assert_equal(net.forward.call_count, 1)
assert_list_equal(out, [1, 1])
for k in b.keys():
if k in ['x', 'z']:
assert_true(os.path.isdir(dst_prefix % k))
else:
assert_false(os.path.isdir(dst_prefix % k))
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_fixed_dims_n(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(1, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
for n in range(1, 10):
net = mock_net()
net.reset_mock()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_fixed_dims_n_%s_lmdb')
out = infr.infer_to_lmdb(net, ['x', 'z'], n, dst_prefix)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n, n])
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_fixed_dims_preserve_batch_no(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_fixed_dims_preserve_batch_no_%s_lmdb')
for k in b.keys():
assert_false(os.path.isdir(dst_prefix % k))
n = 3
out = infr.infer_to_lmdb(net, ['x', 'z'], n, dst_prefix)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n * 4] * 2)
for k in b.keys():
if k in ['x', 'z']:
assert_true(os.path.isdir(dst_prefix % k))
else:
assert_false(os.path.isdir(dst_prefix % k))
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_cur_multi_key(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_cur_multi_key_%s_lmdb')
for k in b.keys():
assert_false(os.path.isdir(dst_prefix % k))
n = 3
out = infr.infer_to_lmdb_cur(net, ['x', 'z'], n, dst_prefix)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n * 4] * 2)
for k in b.keys():
if k in ['x', 'z']:
assert_true(os.path.isdir(dst_prefix % k))
else:
assert_false(os.path.isdir(dst_prefix % k))
@patch('nideep.eval.inference.caffe.Net')
def test_infer_to_lmdb_cur_single_key(self, mock_net):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_infer_to_lmdb_cur_single_key_%s_lmdb')
for k in b.keys():
assert_false(os.path.isdir(dst_prefix % k))
n = 3
out = infr.infer_to_lmdb_cur(net, ['z'], n, dst_prefix)
assert_equal(net.forward.call_count, n)
assert_list_equal(out, [n * 4])
for k in b.keys():
if k in ['z']:
assert_true(os.path.isdir(dst_prefix % k))
else:
assert_false(os.path.isdir(dst_prefix % k))
@patch('nideep.eval.inference.est_min_num_fwd_passes')
@patch('nideep.eval.inference.caffe.Net')
def test_response_to_lmdb(self, mock_net, mock_num):
# fake minimal test data
b = {k : Bunch(data=np.random.rand(4, 1, 3, 2)) for k in ['x', 'y', 'z']}
# mock methods and properties of Net objects
mock_num.return_value = 3
mock_net.return_value.forward.return_value = np.zeros(1)
type(mock_net.return_value).blobs = PropertyMock(return_value=b)
net = mock_net()
dst_prefix = os.path.join(self.dir_tmp, 'test_response_to_lmdb_')
for m in ['train', 'test']:
for k in b.keys():
assert_false(os.path.isdir(dst_prefix + ('%s_' + m + '_lmdb') % k))
import nideep
out = nideep.eval.inference.response_to_lmdb("net.prototxt",
"w.caffemodel",
['x', 'z'],
dst_prefix)
assert_equal(net.forward.call_count, 3 * 2) # double for both modes
from caffe import TRAIN, TEST
assert_list_equal(out.keys(), [TRAIN, TEST])
assert_list_equal(out[TRAIN], [3 * 4] * 2)
assert_list_equal(out[TEST], [3 * 4] * 2)
for m in ['train', 'test']:
for k in b.keys():
if k in ['x', 'z']:
assert_true(os.path.isdir(dst_prefix + ('%s_' + m + '_lmdb') % k))
else:
assert_false(os.path.isdir(dst_prefix + ('%s_' + m + '_lmdb') % k))
|
import PIL
from PIL import Image
import math
def changeColorDepth(image, colorCount):
# taken from ufp.image
if image.mode == 'RGB' or image.mode == 'RGBA':
ratio = 256 / colorCount
change = lambda value: math.trunc(value/ratio)*ratio
return PIL.Image.eval(image, change)
else:
raise ValueError('Error: Only supports RGB images.')
depth = 4
img_name = input("Enter file name: ")
try:
img = changeColorDepth(Image.open(img_name), (depth**2))
except:
print("File", img_name, "not found! Exiting.")
exit()
print("Adjusting image colour depth...", end = " ")
result = img.convert('P', palette=Image.ADAPTIVE, colors=(2**depth))
print("Done")
print("Determining palette...", end=" ")
width, height = result.size
pixels = []
output_mem = ""
conv = result.convert("RGB")
for y in range(height):
for x in range(width):
r,g,b = conv.getpixel((x, y))
px_colour = (r,g,b)
if len(pixels) < (2**depth) and px_colour not in pixels:
pixels.append(px_colour)
output_mem += str(hex(pixels.index(px_colour)))[2].upper() + " "
output_mem += "\n"
print("Done")
hcs = ""
print("Palette:")
for colour in pixels:
col_str = ""
for val in colour:
v = str(hex(val))
if len(v) == 3:
col_str += "0"
else:
col_str += v[2].upper()
hcs += col_str + " "
print(hcs)
print("Writing .mem files...", end=" ")
pal_file = open("img_palette.mem", 'w')
pal_file.write(hcs)
pal_file.close()
img_file = open("img.mem", 'w')
img_file.write(output_mem)
img_file.close()
print("Done")
result.show()
|
"""This class may be better inside dataClay [contrib] code.
However, I was having some issues regarding model registration and general
usability of the classes and splits. So that ended up here.
"""
import numpy as np
from dataclay import DataClayObject, dclayMethod
try:
from pycompss.api.task import task
from pycompss.api.parameter import IN
except ImportError:
from dataclay.contrib.dummy_pycompss import task, IN
class ItemIndexAwareSplit(DataClayObject):
"""Split that tracks the internal item index for each chunk.
@dclayImport numpy as np
@ClassField _chunks anything
@ClassField _idx anything
@ClassField _item_idx anything
@ClassField backend anything
# _coordinator is "volatile" --not persisted
"""
@dclayMethod(backend="anything", coordinator="anything")
def __init__(self, backend, coordinator):
"""Build a LocalIterator through a list of chunks.
:param chunks: Sequence of (iterable) chunks.
"""
# If this is not being called remotely, better to coerce to list right now
self._chunks = list()
self._idx = list()
self._item_idx = list()
self.backend = backend
# volatile
self._coordinator = coordinator
@dclayMethod(idx="anything", obj="anything")
def add_object(self, idx, obj):
self._chunks.append(obj)
self._idx.append(idx)
offset = self._coordinator.offset
newoffset = offset + len(obj)
self._coordinator.offset = newoffset
self._item_idx.append(range(offset, newoffset))
# Note that the return is not serializable, thus the _local flag
@dclayMethod(return_="anything", _local=True)
def __iter__(self):
return iter(self._chunks)
@dclayMethod(return_="list")
def get_indexes(self):
return self._idx
@dclayMethod(return_="numpy.nadarray")
def get_item_indexes(self):
return np.hstack(self._item_idx)
# Being local is not a technical requirement, but makes sense for
# performance reasons.
@dclayMethod(return_="anything", _local=True)
def enumerate(self):
return zip(self._idx, self._chunks)
@task(target_direction=IN, returns=object)
@dclayMethod(centers="anything", return_="anything")
def compute(self, centers):
subresults = list()
for frag in self._chunks:
subresults.append(frag.partial_sum(centers))
return subresults
|
"""High-level plotly interface.
This module contains functions for creating various graphical components such
as tables, vectors, 3d polygons, etc. It also contains functions for nicely
formatting numbers and equations. The module serves as a high-level interface
to the expansive plotly visualization package.
"""
__author__ = 'Henry Robbins'
__all__ = ['Figure', 'num_format', 'linear_string', 'equation_string',
'label', 'table', 'vector', 'scatter', 'line', 'equation',
'polygon', 'polytope', 'plot_tree']
from ._geometry import order, polytope_vertices, polytope_facets
import networkx as nx
import numpy as np
from plotly.basedatatypes import BaseTraceType
import plotly.graph_objects as plt
import plotly.io as pio
from plotly.subplots import make_subplots
from typing import List, Dict, Union
class Figure(plt.Figure):
"""Extension of the plotly Figure which maintains trace names.
This class extends the plotly Figure class. It provides the abiliity to
give trace(s) a name. A map from trace names to their indices is maintained
so that traces can easily be accessed later. Furthermore, it overrides the
show and write_html functions by passing configuration settings.
Attributes:
_trace_name_to_indices (Dict): Map of trace names to trace indices.
_axis_limits (List[float]): Axis limits of this figure.
"""
_config = dict(doubleClick=False,
displayModeBar=False,
editable=False,
responsive=False,
showAxisDragHandles=False,
showAxisRangeEntryBoxes=False)
"""Configuration settings to be used by show and write_html functions."""
def __init__(self, subplots: bool, *args, **kwargs):
"""Initialize the figure.
If subplots is true, the args and kwargs are passed to make_subplots
to generate a subplot; otherwise, the args and kwargs are passed to
the parent class plotly.graph_objects.Figure __init__ method.
Args:
subplots (bool): True if arguments are intended for make_subplots.
"""
if subplots:
self.__dict__.update(make_subplots(*args, **kwargs).__dict__)
else:
super(Figure, self).__init__(*args, **kwargs)
self.__dict__['_trace_name_to_indices'] = {}
self.__dict__['_axis_limits'] = None
def add_trace(self,
trace: BaseTraceType,
name: str = None,
row: int = None,
col: int = None):
"""Add a trace to the figure.
If no name argument is passed, there will be no name mapping to this
trace. It must be accessed by its index directly.
Args:
trace (BaseTraceType): A trace to be added to the figure.
name (str, optional): Name to reference the trace by.
row (int): Row to add this trace to.
col (int): Column to add this trace to.
"""
self.add_traces(traces=[trace], name=name, rows=row, cols=col)
def add_traces(self,
traces: List[BaseTraceType],
name: str = None, **kwargs):
"""Add traces to the figure.
If no name argument is passed, there will be no name mapping to these
traces. They must be accessed by their indices directly.
Args:
traces (List[BaseTraceType]): List of traces to add to the figure.
name (str, optional): Name to reference the traces by.
Raises:
ValueError: This trace name is already in use.
"""
if name is not None:
if name in self._trace_name_to_indices.keys():
raise ValueError('This trace name is already in use.')
n = len(self.data)
self._trace_name_to_indices[name] = list(range(n, n+len(traces)))
# Time trials revealed adding traces one at a time to be quicker than
# using the add_traces function.
for trace in traces:
super(Figure, self).add_traces(data=trace, **kwargs)
def get_indices(self, name: str, containing: bool = False) -> List[int]:
"""Return the list of trace indices with given name.
If containing is False, find trace indices whose trace name is exactly
as given; otherwise, find all trace indices whose trace name at least
contains the given name.
Args:
name (str): Name of traces to be accessed.
containing (bool): True if trace names containing name returned.
Returns:
List[int]: List of trace indices.
"""
if containing:
keys = [key for key in self._trace_name_to_indices if name in key]
indices = [self._trace_name_to_indices[key] for key in keys]
indices = [item for sublist in indices for item in sublist]
else:
indices = self._trace_name_to_indices[name]
return indices
def set_axis_limits(self, limits: List[float]):
"""Set axis limits and add extreme point to prevent rescaling.
Args:
limits (List[float]): The list of axis limits.
Raises:
ValueError: The list of axis limits is not length 2 or 3.
"""
n = len(limits)
if n not in [2,3]:
raise ValueError('The list of axis limits is not length 2 or 3.')
self._axis_limits = limits
if n == 2:
x_lim, y_lim = limits
pt = [np.array([[x_lim],[y_lim]])]
self.layout.xaxis1.range = [0, x_lim]
self.layout.yaxis1.range = [0, y_lim]
if n == 3:
x_lim, y_lim, z_lim = limits
pt = [np.array([[x_lim],[y_lim],[z_lim]])]
self.layout.scene1.xaxis.range = [0, x_lim]
self.layout.scene1.yaxis.range = [0, y_lim]
self.layout.scene1.zaxis.range = [0, z_lim]
self.add_trace(scatter(pt, visible=False))
def get_axis_limits(self) -> List[float]:
"""Return the list of axis limits.
Returns:
List[float]: List of axis limits.
"""
return self._axis_limits.copy()
def update_sliders(self, default: bool = False):
"""Update the sliders of this figure.
If a trace is added after a slider is created, the visibility of that
trace in the steps of the slider is not specified. This method sets
the visibility of these traces to False.
Args:
default (bool): Default visibility if unknown. Defaults to False.
"""
n = len(self.data)
for slider in self.layout.sliders:
for step in slider.steps:
tmp = list(step.args[0]['visible'])
step.args[0]['visible'] = tmp + [default]*(n-len(tmp))
def show(self, **kwargs):
"""Show the figure using default configuration settings."""
kwargs['config'] = Figure._config
plt.Figure.show(self, **kwargs)
def write_html(self, file: str, **kwargs):
""" Write a figure to an HTML file representation.
Args:
file (str): name of the file to write the HTML to."""
kwargs['config'] = Figure._config
pio.write_html(self, file, **kwargs)
def _ipython_display_(self):
"""Handle rich display of figures in ipython contexts."""
if pio.renderers.render_on_display and pio.renderers.default:
self.show()
else:
print(repr(self))
def num_format(num: Union[int,float], precision: int = 3) -> str:
"""Return a properly formated string for a number at some precision.
Formats a number to some precesion with trailing 0 and . removed.
Args:
num (Union[int,float]): Number to be formatted.
precision (int, optional): Precision to use. Defaults to 3.
Returns:
str: String representation of the number."""
return ('%.*f' % (precision, num)).rstrip('0').rstrip('.')
def linear_string(A: np.ndarray,
indices: List[int],
constant: float = None) -> str:
"""Return the string representation of a linear combination.
For A = [a1,..,an] and indices = [i1,..,in], returns the linear combination
a1 * x_(i1) + ... + an * x_(in) with a1,..,an formatted correctly. If a
constant b is provided, then returns b + a1 * x_(i1) + ... + an * x_(in).
Args:
A (np.ndarray): List of coefficents for the linear combination.
indices (List[int]): List of indices of the x variables.
constant (float, optional): Constant of the linear combination.
Returns:
str: String representation of the linear combination.
"""
# This function returns the correct sign (+ or -) prefix for a number
def sign(num: float):
return {-1: ' - ', 0: ' + ', 1: ' + '}[np.sign(num)]
s = ''
if constant is not None:
s += num_format(constant)
for i in range(len(indices)):
if i == 0:
if constant is None:
s += num_format(A[0]) + 'x<sub>' + str(indices[0]) + '</sub>'
else:
s += (sign(A[0]) + num_format(abs(A[0])) + 'x<sub>'
+ str(indices[0]) + '</sub>')
else:
s += num_format(abs(A[i])) + 'x<sub>' + str(indices[i]) + '</sub>'
if i is not len(indices)-1:
s += sign(A[i+1])
return s
def equation_string(A: np.ndarray, b: float, rel: str = ' ≤ ') -> str:
"""Return the string representation of an equation.
For A = [a1,..,an], b, and rel returns the string form of the equation
a1 * x_(1) + ... + an * x_(n) rel b where rel represents some equality
symbol = or inequality symbol <, >, ≥, ≤, ≠.
Args:
A (np.ndarray): Coefficents of the equation's LHS.
b (float): Constant on the RHS of the equation.
comp (str): Relation symbol: =, <, >, ≥, ≤, ≠.
Returns:
str: String representation of the equation.
"""
return linear_string(A, list(range(1, len(A) + 1))) + rel + num_format(b)
def label(dic: Dict[str, Union[float, list]]) -> str:
"""Return a styled string representation of the given dictionary.
Every key, value pair in the dictionary is on its own line with the key
name bolded followed by the formatted value it maps to.
Args:
dic (Dict[str, Union[float, list]]): Dictionary to create string for.
Returns:
str: String representation of the given dictionary.
"""
entries = []
for key in dic.keys():
s = '<b>' + key + '</b>: '
value = dic[key]
if type(value) is float:
s += num_format(value)
if type(value) is list:
s += '(%s)' % ', '.join(map(str, [num_format(i) for i in value]))
entries.append(s)
return '%s' % '<br>'.join(map(str, entries))
def table(header: List[str],
content: List[str],
template: Dict = None,
**kwargs) -> plt.Table:
"""Return a table trace with given headers and content.
Note: keyword arguments given outside of template are given precedence.
Args:
header (List[str]): Column titles for the table.
content (List[str]): Content in each column of the table.
template (Dict): Dictionary of trace attributes. Defaults to None.
*kwargs: Arbitrary keyword arguments for plt.Table.
Returns:
plt.Table: A table trace with given headers and content.
"""
if template is None:
return plt.Table(header_values=header, cells_values=content)
else:
template = dict(template)
template.update(kwargs)
template['header']['values'] = header
template['cells']['values'] = content
return plt.Table(template)
def vector(tail: np.ndarray,
head: np.ndarray,
template: Dict = None,
**kwargs) -> Union[plt.Scatter, plt.Scatter3d]:
"""Return a 2d or 3d vector trace from tail to head.
Note: keyword arguments given outside of template are given precedence.
Args:
tail (np.ndarray): Point of the vector tail (in vector form).
head (np.ndarray): Point of the vector head (in vector form).
template (Dict): Dictionary of scatter attributes. Defaults to None.
*kwargs: Arbitrary keyword arguments for plt.Scatter or plt.Scatter3d.
"""
return scatter(x_list=[tail,head], template=template, **kwargs)
def scatter(x_list: List[np.ndarray],
template: Dict = None,
**kwargs) -> Union[plt.Scatter, plt.Scatter3d]:
"""Return a scatter trace for the given set of points.
Note: keyword arguments given outside of template are given precedence.
Args:
x_list (List[np.ndarray]): List of points in the form of vectors.
template (Dict): Dictionary of scatter attributes. Defaults to None.
*kwargs: Arbitrary keyword arguments for plt.Scatter or plt.Scatter3d.
Returns:
Union[plt.Scatter, plt.Scatter3d]: A scatter trace.
"""
pts = list(zip(*[list(x[:,0]) for x in x_list]))
pts = pts + [None]*(3 - len(pts))
x,y,z = pts
if template is None:
template = kwargs
else:
template = dict(template)
template.update(kwargs)
template['x'] = x
template['y'] = y
if z is None:
return plt.Scatter(template)
else:
template['z'] = z
return plt.Scatter3d(template)
def line(x_list: List[np.ndarray],
template: Dict = None,
**kwargs) -> plt.Scatter:
"""Return a scatter trace representing a 2d line.
Note: keyword arguments given outside of template are given precedence.
Args:
x_list (List[np.ndarray]): List of points in the form of vectors.
template (Dict): Dictionary of scatter attributes. Defaults to None.
*kwargs: Arbitrary keyword arguments for plt.Scatter.
Returns:
plt.Scatter: A scatter trace representing a 2d line.
"""
return scatter(x_list=x_list, template=template, **kwargs)
def equation(A: np.ndarray,
b: float,
domain: List[float],
template: Dict = None,
**kwargs) -> Union[plt.Scatter, plt.Scatter3d]:
"""Return a 2d or 3d trace representing the given equation.
Note: keyword arguments given outside of template are given precedence.
Args:
A (np.ndarray): LHS coefficents of the equation.
b (float): RHS coefficent of the equation.
domain (List[float]): Domain on which to plot this equation.
template (Dict): Dictionary of scatter attributes. Defaults to None.
*kwargs: Arbitrary keyword arguments for plt.Scatter or plt.Scatter3d.
Raises:
ValueError: Only supports equations in 2 or 3 variables.
ValueError: A must have a nonzero component.
Returns:
Union[plt.Scatter, plt.Scatter3d]: A trace representing the equation.
"""
n = len(A)
if n not in [2,3]:
raise ValueError('Only supports equations in 2 or 3 variables.')
if all(A == np.zeros(n)):
raise ValueError('A must have a nonzero component.')
if n == 2:
x_lim, y_lim = domain
# A[0]x + A[1]y = b
if A[1] != 0:
x = np.linspace(0,x_lim,2)
y = (b - A[0]*x)/A[1]
x_list = [np.array([[x[i]],[y[i]]]) for i in range(len(x))]
else:
x = b/A[0]
x_list = [np.array([[x],[0]]),np.array([[x],[y_lim]])]
return line(x_list=x_list, template=template, **kwargs)
if n == 3:
x_lim, y_lim, z_lim = domain
# A[0]x + A[1]y + A[2]z = b
x_list = []
if A[2] != 0:
for x in [0,x_lim]:
for y in [0,y_lim]:
z = (b - A[0]*x - A[1]*y)/A[2]
x_list.append(np.array([[x],[y],[z]]))
elif A[1] != 0:
for x in [0,x_lim]:
y = (b - A[0]*x)/A[1]
for z in [0,z_lim]:
x_list.append(np.array([[x],[y],[z]]))
else:
x = b/A[0]
for y in [0,y_lim]:
for z in [0,z_lim]:
x_list.append(np.array([[x],[y],[z]]))
return polygon(x_list=x_list, template=template, **kwargs)
def polygon(x_list: List[np.ndarray],
ordered: bool = False,
template: Dict = None,
**kwargs) -> Union[plt.Scatter, plt.Scatter3d]:
"""Return a 2d or 3d polygon trace defined by the given points.
Note: keyword arguments given outside of template are given precedence.
Args:
x_list (List[np.ndarray]): List of points in the form of vectors.
ordered (bool): True if given points are ordered. Defaults to False.
template (Dict): Dictionary of scatter attributes. Defaults to None.
*kwargs: Arbitrary keyword arguments for plt.Scatter or plt.Scatter3d.
Returns:
Union[plt.Scatter, plt.Scatter3d]: A 2d or 3d polygon trace.
Raises:
ValueError: The list of points was empty.
ValueError: The points are not 2 or 3 dimensional.
"""
if len(x_list) == 0:
raise ValueError("The list of points was empty.")
if len(x_list[0]) not in [2,3]:
raise ValueError("The points are not 2 or 3 dimensional.")
if len(x_list[0]) == 2:
if not ordered:
x,y = order(x_list)
else:
x_list.append(x_list[0])
x,y = zip(*[list(x[:,0]) for x in x_list])
z = None
else:
if not ordered:
x,y,z = order(x_list)
else:
x_list.append(x_list[0])
x,y,z = zip(*[list(x[:,0]) for x in x_list])
# When plotting a surface in Plotly, the surface is generated with
# respect to a chosen axis. If the surface is orthogonal to this
# axis, then the surface will not appear. This next step ensures
# that each polygon surface will properly display.
axis = 2 # default axis
if len(x) > 2:
# Get the normal vector of this polygon
v1 = [x[1] - x[0], y[1] - y[0], z[1] - z[0]]
v2 = [x[2] - x[0], y[2] - y[0], z[2] - z[0]]
n = np.round(np.cross(v1,v2), 7)
for ax in range(3):
if not np.dot(n,[1 if i == ax else 0 for i in range(3)]) == 0:
axis = ax
if template is None:
template = kwargs
else:
template = dict(template)
template.update(kwargs)
if z is None:
x_list = [np.array([i]).transpose() for i in zip(x,y)]
return scatter(x_list=x_list, template=template)
else:
x_list = [np.array([i]).transpose() for i in zip(x,y,z)]
template['surfaceaxis'] = axis
return scatter(x_list=x_list, template=template)
def polytope(A: np.ndarray,
b: np.ndarray,
vertices: List[np.ndarray] = None,
template: Dict = None,
**kwargs) -> List[Union[plt.Scatter, plt.Scatter3d]]:
"""Return a 2d or 3d polytope defined by the list of halfspaces Ax <= b.
Returns a plt.Scatter polygon in the case of a 2d polytope and returns a
list of plt.Scatter3d polygons in the case of a 3d polytope. The vertices
of the halfspace intersection can be provided to improve computation time.
Note: keyword arguments given outside of template are given precedence.
Args:
A (np.ndarray): LHS coefficents of halfspaces
b (np.ndarray): RHS coefficents of halfspaces
vertices (List[np.ndarray]): Vertices of the halfspace intersection.
template (Dict): Dictionary of scatter attributes. Defaults to None.
*kwargs: Arbitrary keyword arguments for plt.Scatter or plt.Scatter3d.
Returns:
List[Union[plt.Scatter, plt.Scatter3d]]: 2d or 3d polytope.
"""
if vertices is None:
vertices = polytope_vertices(A,b)
if A.shape[1] == 2:
return [polygon(x_list=vertices,
template=template,
**kwargs)]
if A.shape[1] == 3:
facets = polytope_facets(A, b, vertices=vertices)
polygons = []
for facet in facets:
if len(facet) > 0:
polygons.append(polygon(x_list=facet,
template=template,
**kwargs))
return polygons
def tree_positions(T:nx.classes.graph.Graph,
root:Union[str,int]) -> Dict[int, List[float]]:
"""Get positions for every node in the tree T with the given root.
Args:
T (nx.classes.graph.Graph): Tree graph.
root (Union[str,int]): Root of the tree graph
Returns:
Dict[int, List[float]]: Dictionary from nodes in T to positions.
"""
PAD = 0.1
HORIZONTAL_SPACE = 0.2
position = {}
position[root] = (0.5, 1-PAD) # root position
node_to_level = nx.single_source_shortest_path_length(T, root)
level_count = max(node_to_level.values()) + 1
levels = {}
for l in range(level_count):
levels[l] = [i for i in node_to_level if node_to_level[i] == l]
level_heights = np.linspace(1.1, -0.1, level_count + 2)[1:-1]
for l in range(1, level_count):
# If there are more than 5 nodes in level, spread evenly across width;
# otherwise, try to put nodes under their parent.
if len(levels[l]) <= 4:
# get parents of every pair of children in the level
children = {}
for node in levels[l]:
parent = [i for i in list(T.neighbors(node)) if i < node][0]
if parent in children:
children[parent].append(node)
else:
children[parent] = [node]
# initial attempt at positioning
pos = {}
for parent in children:
x = position[parent][0]
d = max((1/2)**(l+1), HORIZONTAL_SPACE / 2)
pos[children[parent][0]] = [x-d, level_heights[l]]
pos[children[parent][1]] = [x+d, level_heights[l]]
# perturb if needed
keys = list(pos.keys())
x = [p[0] for p in pos.values()]
n = len(x) - 1
while any([x[i+1]-x[i]+0.05 < HORIZONTAL_SPACE for i in range(n)]):
for i in range(len(x)-1):
if abs(x[i+1] - x[i]) < HORIZONTAL_SPACE:
shift = (HORIZONTAL_SPACE - abs(x[i+1] - x[i]))/2
x[i] -= shift
x[i+1] += shift
# shift to be within width
x[0] = x[0] + (max(PAD - x[0], 0))
for i in range(1,len(x)):
x[i] = x[i] + max(HORIZONTAL_SPACE - (x[i] - x[i-1]), 0)
x[-1] = x[-1] - (max(x[-1] - (1-PAD), 0))
for i in reversed(range(len(x)-1)):
x[i] = x[i] - max(HORIZONTAL_SPACE - (x[i+1] - x[i]), 0)
# update the position dictionary with new x values
for i in range(len(x)):
pos[keys[i]][0] = x[i]
# set position
for node in pos:
position[node] = pos[node]
else:
level_widths = np.linspace(-0.1, 1.1, len(levels[l]) + 2)[1:-1]
for j in range(len(levels[l])):
position[(levels[l][j])] = (level_widths[j], level_heights[i])
return position
def plot_tree(fig:Figure,
T:nx.classes.graph.Graph,
root:Union[str,int],
row:int = 1,
col:int = 2):
"""Plot the tree on the figure.
This function assumes the type of subplot at the given row and col is of
type scatter plot and has both x and y range of [0,1].
Args:
fig (Figure): The figure to which the tree should be plotted.
T (nx.classes.graph.Graph): Tree to be plotted.
root (Union[str,int]): Root node of the tree.
row (int, optional): Subplot row of the figure. Defaults to 1.
col (int, optional): Subplot col of the figure. Defaults to 2.
"""
nx.set_node_attributes(T, tree_positions(T, root), 'pos')
edge_x = []
edge_y = []
for edge in T.edges():
x0, y0 = T.nodes[edge[0]]['pos']
x1, y1 = T.nodes[edge[1]]['pos']
edge_x += [x0, x1, None]
edge_y += [y0, y1, None]
edge_trace = plt.Scatter(x=edge_x, y=edge_y,
line=dict(width=1, color='black'),
hoverinfo='none', showlegend=False, mode='lines')
fig.add_trace(trace=edge_trace, row=row, col=col)
for node in T.nodes():
if 'text' in T.nodes[node]:
text = T.nodes[node]['text']
else:
text = node
if 'template' in T.nodes[node]:
template = T.nodes[node]['template']
else:
template = 'unexplored'
x,y = T.nodes[node]['pos']
fig.add_annotation(x=x, y=y, visible=True, text=text,
templateitemname=template, row=row, col=col)
|
from setuptools import setup
setup(
name='setup_case_py',
version='1',
packages=[''],
url='',
license='',
author='sarambl',
author_email='s.m.blichner@geo.uio.no',
description=''
)
|
import pylab
import numpy as np
import re
data = [i.split() for i in open('cluster_members.csv').readlines()]
print [len(i) for i in data]
nClusters = len(data)
nCols = 3
nRows = (nClusters+(nCols-1))/nCols
print nClusters, nRows, nCols
def getGroupPrefix(runPrefix):
groupPrefix = runPrefix.split('run')[0].strip('_')
return groupPrefix
groupPrefix2prefixes2frames = {}
prefix2frames = {}
for cluster in data:
for member in cluster:
result = re.findall('([a-zA-Z0-9_-]+)_([0-9]+)', member)
#print member, result
prefix = result[0][0].strip('_')
groupPrefix = getGroupPrefix(prefix)
if not groupPrefix in groupPrefix2prefixes2frames.keys():
groupPrefix2prefixes2frames[groupPrefix] = {}
groupPrefix2prefixes2frames[groupPrefix][prefix] = []
frame = int(result[0][1])
prefix2frames[prefix] = prefix2frames.get(prefix,[]) + [frame]
groupPrefix2prefixes2frames[groupPrefix][prefix] += [frame]
#print prefix2frames
prefixes = prefix2frames.keys()
prefixes.sort()
nPrefixes = len(prefixes)
groupPrefixes = list(set([getGroupPrefix(i) for i in prefixes]))
groupPrefixes.sort()
nGroupPrefixes = len(groupPrefixes)
groupPrefix2row = dict([(groupPrefix, index) for index, groupPrefix in enumerate(groupPrefixes)])
#prefix2row = dict([(prefix, index) for index, prefix in enumerate(prefixes)])
prefix2row = dict([(prefix, groupPrefix2row[getGroupPrefix(prefix)]) for prefix in prefixes])
#frames = {}
prefixAndFrame2Col = {}
groupPrefixColsTaken = {}
for prefix in prefixes:
groupPrefix = getGroupPrefix(prefix)
frames = prefix2frames[prefix]
frames.sort()
#if prefix in prefixAndFrame2Col.keys():
if groupPrefix in groupPrefixColsTaken.keys():
#Note that frame numbering starts at 1 so off-by-1 won't be a problem here
offset = max(groupPrefixColsTaken[groupPrefix])
else:
offset = 0
prefixAndFrame2Col[prefix] = dict([(frame, index+offset) for index, frame in enumerate(frames)])
groupPrefixColsTaken[groupPrefix] = groupPrefixColsTaken.get(groupPrefix,[])+[i+offset+1 for i, frame in enumerate(frames)]
#memberships = np.zeros((nClusters, nPrefixes, max([len(i) for i in prefix2frames.values()])))
#memberships = np.zeros((nClusters, nGroupPrefixes, max([len(i) for i in prefix2frames.values()])))
memberships = np.zeros((nClusters, nGroupPrefixes, max([max(i) for i in groupPrefixColsTaken.values()])))
print [(groupPrefix, max(i)) for groupPrefix, i in zip(groupPrefixColsTaken.keys(), groupPrefixColsTaken.values())]
#1/0
memberships -= 1
for clusterIndex, cluster in enumerate(data):
for member in cluster:
result = re.findall('([a-zA-Z0-9_-]+)_([0-9]+)', member)
prefix = result[0][0].strip('_')
frame = int(result[0][1])
row = prefix2row[prefix]
col = prefixAndFrame2Col[prefix][frame]
#print member
if sum(memberships[:,row,col]) > 0:
print memberships[:,row,col]
1/0
memberships[:,row,col] = 0
memberships[clusterIndex,row,col] = 1
for clusterIndex, cluster in enumerate(data):
pylab.subplot(nRows, nCols, clusterIndex+1)
pylab.imshow(memberships[clusterIndex,:,:],
interpolation='nearest',
vmin=-1, vmax=1,
aspect='auto')
#pylab.yticks(range(len(prefixes)), [[]*4+[prefix] for i, prefix in enumerate(prefixes) if i%5==0])
#pylab.yticks(range(0,len(prefixes),1), prefixes )
pylab.yticks(range(0,len(groupPrefixes),1), groupPrefixes )
pylab.xlabel('Frame')
pylab.title('Cluster %i' %(clusterIndex))
pylab.subplots_adjust(left=0.06,
right=0.99,
bottom=0.025,
top=0.975,
hspace=0.250)
pylab.show()
#Analyze inter-cluster transitions
#nSimulations = len(prefixes)
nSimulations = memberships.shape[0]
nCols = 3
# First +1 because the first plot is of all the simulations summed up
nRows = (nSimulations+1+(nCols-1))/nCols
allTransitions = np.zeros((nClusters, nClusters))
#transitions = dict(((prefix,np.zeros((nClusters, nClusters))) for prefix in prefixes))
transitions = dict(((groupPrefix,np.zeros((nClusters, nClusters))) for groupPrefix in groupPrefixes))
for simulationIndex, groupPrefix in enumerate(groupPrefixes):
simulationIndex = groupPrefixes.index(groupPrefix)
for prefix in groupPrefix2prefixes2frames[groupPrefix].keys():
currentCluster = None
sliceStartCol = min(prefixAndFrame2Col[prefix].values())
sliceEndCol = max(prefixAndFrame2Col[prefix].values())
#simSlice = memberships[:,simulationIndex,:]
#for frameIndex in range(simSlice.shape[1]):
for frameIndex in range(sliceStartCol, sliceEndCol+1):
nonzeros = np.nonzero(memberships[:,simulationIndex,frameIndex]==1)
if len(nonzeros[0]) == 0:
nextCluster = None
elif len(nonzeros[0]) > 1:
print nextCluster
1/0
else:
nextCluster = nonzeros[0][0]
#if not(currentCluster is None) and (nextCluster is None):
# print prefix, 'CurrentCluster is %r and nextCluster is %r' %(currentCluster, nextCluster)
if not(currentCluster is None) and not(nextCluster is None):
allTransitions[currentCluster, nextCluster] += 1
transitions[groupPrefix][currentCluster, nextCluster] += 1
#if currentCluster == 7:
# print prefix, 'currentCluster is 7 and nextCluster is %r' %(nextCluster)
currentCluster = nextCluster
#print nextCluster
#pylab.subplot(nRows, nCols, simulationIndex+1)
#pylab.imshow(np.log10(transitions[prefix]),
# interpolation='nearest')
#pylab.colorbar()
#pylab.show()
# Network diagram of cluster transitions
import networkx as nx
G=nx.Graph()
#nonzeros = np.nonzero(allTransitions)
#print nonzeros
#for row, col in zip(nonzeros[0], nonzeros[1]):
nCols = 5
nRows = ((nGroupPrefixes) / nCols)
#pylab.subplot(nRows, nCols, 1)
pylab.subplot(1, 1, 1)
for row in range(nClusters):
for col in range(nClusters):
G.add_edge(row,col,weight=allTransitions[row,col]+allTransitions[col,row])
elarge=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] >5]
emed=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <=5 and d['weight'] > 2.5]
esmall=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <=2.5 and d['weight'] > 0]
#pos = nx.spring_layout(G, k=2./np.sqrt(nClusters))
pos = nx.spring_layout(G, k=3./np.sqrt(nClusters))
#nodes
nFramesPerClusterAllSims = [np.sum(memberships[clusterIndex,:,:]==1) for clusterIndex in range(nClusters)]
nodeSizes = [150. * float(i)/(np.mean(nFramesPerClusterAllSims)) for i in nFramesPerClusterAllSims]
nx.draw_networkx_nodes(G, pos, node_size=nodeSizes)
# edges
nx.draw_networkx_edges(G,pos,edgelist=elarge,
width=6)
nx.draw_networkx_edges(G,pos,edgelist=emed,
width=6,alpha=1.,edge_color='b',style='dashed')
nx.draw_networkx_edges(G,pos,edgelist=esmall,
width=6,alpha=0.3,edge_color='b',style='dashed')
# labels
nx.draw_networkx_labels(G,pos,font_size=20,font_family='sans-serif')
pylab.title('All')
pylab.xticks([],[])
pylab.yticks([],[])
pylab.show()
for simulationIndex, groupPrefix in enumerate(groupPrefixes):
pylab.subplot(nRows, nCols, simulationIndex+1)
G=nx.Graph()
#nonzeros = np.nonzero(transitions[prefix])
#for row, col in zip(nonzeros[0], nonzeros[1]):
for row in range(nClusters):
for col in range(nClusters):
G.add_edge(row,col,weight=transitions[groupPrefix][row,col]+transitions[groupPrefix][col,row])
elarge=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] >5]
emed=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <=5 and d['weight'] > 2.5]
esmall=[(u,v) for (u,v,d) in G.edges(data=True) if d['weight'] <=2.5 and d['weight'] > 0]
#pos = nx.spring_layout(G)
# nodes
nFramesPerClusterThisSim = [np.sum(memberships[clusterIndex,simulationIndex,:]==1) for clusterIndex in range(nClusters)]
print nFramesPerClusterThisSim
nodeSizes = [150. * float(i)/(np.mean(nFramesPerClusterAllSims)/nGroupPrefixes) for i in nFramesPerClusterThisSim]
print nodeSizes
nx.draw_networkx_nodes(G, pos, node_size=nodeSizes)
# edges
nx.draw_networkx_edges(G,pos,edgelist=elarge,
width=6)
nx.draw_networkx_edges(G,pos,edgelist=emed,
width=6,alpha=1.,edge_color='b',style='dashed')
nx.draw_networkx_edges(G,pos,edgelist=esmall,
width=6,alpha=0.3,edge_color='b',style='dashed')
# labels
nx.draw_networkx_labels(G,pos,font_size=20,font_family='sans-serif')
pylab.xlim([-0.2, 1.2])
pylab.ylim([-0.2, 1.2])
pylab.title(groupPrefix)
pylab.xticks([],[])
pylab.yticks([],[])
pylab.show()
'''
#Show cluster representatives
showFirstNClusters = 6
nAngles = 1
nCols = 4
nRows = (showFirstNClusters / 2) + 1
import matplotlib.image as mpimg
import os
#for clusterNum in range(nClusters):
for clusterNum in range(showFirstNClusters):
thisRow = (clusterNum / 2) + 1
thisCol = ((nAngles+1) * (clusterNum % 2))+1
pylab.subplot(nRows,nCols, ((thisRow-1)*nCols)+thisCol)
pylab.text(0,0,'Cluster %i'%(clusterNum), fontsize=30)
#for angle in range(1,5):
for angle in range(1,nAngles+1):
if not(os.path.exists('clusterPics/cluster%i_render%i.png')):
os.system('convert clusterPics/cluster%i_render%i.tga clusterPics/cluster%i_render%i.png' %(clusterNum, angle, clusterNum, angle))
img = mpimg.imread('clusterPics/cluster%i_render%i.png' %(clusterNum, angle))
thisCol = ((nAngles+1) * (clusterNum % 2)) + angle + 1
pylab.subplot(nRows, nCols,((thisRow-1)*nCols)+thisCol)
pylab.imshow(img)
pylab.xticks([],[])
pylab.yticks([],[])
pylab.show()
'''
|
from __future__ import print_function
import os
import shlex, shutil, getpass
#import subprocess
import FWCore.ParameterSet.Config as cms
process = cms.Process("SiPixelInclusiveBuilder")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.MessageLogger.cerr.enable = False
process.MessageLogger.cout = dict(enable = True, threshold = "WARNING")
process.load("Configuration.StandardSequences.MagneticField_cff")
#hptopo
#process.load("Configuration.StandardSequences.GeometryIdeal_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
from Configuration.AlCa.autoCond import autoCond
process.GlobalTag.globaltag = autoCond['run2_design']
print(process.GlobalTag.globaltag)
process.load("Configuration.StandardSequences.GeometryDB_cff")
#process.load("Geometry.CMSCommonData.cmsIdealGeometryXML_cfi")
#process.load("CalibTracker.Configuration.TrackerAlignment.TrackerAlignment_Fake_cff")
#process.load("Geometry.TrackerGeometryBuilder.trackerGeometry_cfi")
#process.load("Geometry.TrackerNumberingBuilder.trackerNumberingGeometry_cfi")
process.load("CondTools.SiPixel.SiPixelGainCalibrationService_cfi")
process.load("CondCore.CondDB.CondDB_cfi")
process.source = cms.Source("EmptyIOVSource",
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
interval = cms.uint64(1)
)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
#to get the user running the process
user = getpass.getuser()
#try:
# user = os.environ["USER"]
#except KeyError:
# user = subprocess.call('whoami')
# # user = commands.getoutput('whoami')
#file = "/tmp/" + user + "/SiPixelLorentzAngle.db"
file = "siPixelLorentzAngle.db"
sqlfile = "sqlite_file:" + file
print('\n-> Uploading as user %s into file %s, i.e. %s\n' % (user, file, sqlfile))
#standard python libraries instead of spawn processes
if(os.path.isfile('./'+file)):
shutil.move("siPixelLorentzAngle.db", "siPixelLorentzAngle_old.db")
#subprocess.call(["/bin/cp", "siPixelLorentzAngle.db", file])
#subprocess.call(["/bin/mv", "siPixelLorentzAngle.db", "siPixelLorentzAngle.db"])
##### DATABASE CONNNECTION AND INPUT TAGS ######
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
BlobStreamerName = cms.untracked.string('TBufferBlobStreamingService'),
DBParameters = cms.PSet(
authenticationPath = cms.untracked.string('.'),
connectionRetrialPeriod = cms.untracked.int32(10),
idleConnectionCleanupPeriod = cms.untracked.int32(10),
messageLevel = cms.untracked.int32(1),
enablePoolAutomaticCleanUp = cms.untracked.bool(False),
enableConnectionSharing = cms.untracked.bool(True),
connectionRetrialTimeOut = cms.untracked.int32(60),
connectionTimeOut = cms.untracked.int32(0),
enableReadOnlySessionOnUpdateConnection = cms.untracked.bool(False)
),
timetype = cms.untracked.string('runnumber'),
connect = cms.string(sqlfile),
toPut = cms.VPSet(
cms.PSet(
record = cms.string('SiPixelLorentzAngleRcd'),
tag = cms.string('SiPixelLorentzAngle_2015_v2')
#tag = cms.string('SiPixelLorentzAngle_v1')
),
### cms.PSet(
### record = cms.string('SiPixelLorentzAngleSimRcd'),
### tag = cms.string('SiPixelLorentzAngleSim_v1')
### ),
)
)
###### LORENTZ ANGLE OBJECT ######
process.SiPixelLorentzAngle = cms.EDAnalyzer("SiPixelLorentzAngleDB",
magneticField = cms.double(3.8),
#in case of PSet
BPixParameters = cms.untracked.VPSet(
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(1),
angle = cms.double(0.0862)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(2),
angle = cms.double(0.0862)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(3),
angle = cms.double(0.0862)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(4),
angle = cms.double(0.0862)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(5),
angle = cms.double(0.0883)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(6),
angle = cms.double(0.0883)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(7),
angle = cms.double(0.0883)
),
cms.PSet(
layer = cms.uint32(1),
module = cms.uint32(8),
angle = cms.double(0.0883)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(1),
angle = cms.double(0.0848)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(2),
angle = cms.double(0.0848)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(3),
angle = cms.double(0.0848)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(4),
angle = cms.double(0.0848)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(5),
angle = cms.double(0.0892)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(6),
angle = cms.double(0.0892)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(7),
angle = cms.double(0.0892)
),
cms.PSet(
layer = cms.uint32(2),
module = cms.uint32(8),
angle = cms.double(0.0892)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(1),
angle = cms.double(0.0851)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(2),
angle = cms.double(0.0851)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(3),
angle = cms.double(0.0851)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(4),
angle = cms.double(0.0851)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(5),
angle = cms.double(0.0877)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(6),
angle = cms.double(0.0877)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(7),
angle = cms.double(0.0877)
),
cms.PSet(
layer = cms.uint32(3),
module = cms.uint32(8),
angle = cms.double(0.0877)
),
),
FPixParameters = cms.untracked.VPSet(
cms.PSet(
side = cms.uint32(1),
disk = cms.uint32(1),
HVgroup = cms.uint32(1),
angle = cms.double(0.0714)
),
cms.PSet(
side = cms.uint32(1),
disk = cms.uint32(2),
HVgroup = cms.uint32(1),
angle = cms.double(0.0714)
),
cms.PSet(
side = cms.uint32(2),
disk = cms.uint32(1),
HVgroup = cms.uint32(1),
angle = cms.double(0.0713)
),
cms.PSet(
side = cms.uint32(2),
disk = cms.uint32(2),
HVgroup = cms.uint32(1),
angle = cms.double(0.0713)
),
cms.PSet(
side = cms.uint32(1),
disk = cms.uint32(1),
HVgroup = cms.uint32(2),
angle = cms.double(0.0643)
),
cms.PSet(
side = cms.uint32(1),
disk = cms.uint32(2),
HVgroup = cms.uint32(2),
angle = cms.double(0.0643)
),
cms.PSet(
side = cms.uint32(2),
disk = cms.uint32(1),
HVgroup = cms.uint32(2),
angle = cms.double(0.0643)
),
cms.PSet(
side = cms.uint32(2),
disk = cms.uint32(2),
HVgroup = cms.uint32(2),
angle = cms.double(0.0643)
),
),
ModuleParameters = cms.untracked.VPSet(
cms.PSet(
rawid = cms.uint32(302056472),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302056476),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302056212),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302055700),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302055708),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302060308),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302060312),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302059800),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302059548),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302123040),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302122772),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302122776),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302122516),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302122264),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302122272),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302122008),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302121752),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302121496),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302121240),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302121244),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302128920),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302128924),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302129176),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302129180),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302129184),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302128404),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302128408),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302189088),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302188820),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302188832),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302188052),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302187552),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302197784),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302197532),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302197536),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302197016),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302196244),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302195232),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302188824),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302186772),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302186784),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302121992),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302188552),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302187280),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302186768),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302186764),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302186756),
angle = cms.double(0.0955)
),
cms.PSet(
rawid = cms.uint32(302197516),
angle = cms.double(0.0955)
),
),
#in case lorentz angle values for bpix should be read from file -> not implemented yet
useFile = cms.bool(False),
record = cms.untracked.string('SiPixelLorentzAngleRcd'),
fileName = cms.string('lorentzFit.txt')
)
process.SiPixelLorentzAngleSim = cms.EDAnalyzer("SiPixelLorentzAngleDB",
magneticField = cms.double(3.8),
#in case lorentz angle values for bpix should be read from file -> not implemented yet
useFile = cms.bool(False),
record = cms.untracked.string('SiPixelLorentzAngleSimRcd'),
fileName = cms.string('lorentzFit.txt')
)
process.p = cms.Path(
# process.SiPixelLorentzAngleSim*
process.SiPixelLorentzAngle
)
|
#!/usr/bin/python3
"""
# Author: Scott Chubb scott.chubb@netapp.com
# Written for Python 3.7 and above
# No warranty is offered, use at your own risk. While these scripts have been
# tested in lab situations, all use cases cannot be accounted for.
"""
def list_cluster_details_payload():
payload = ({"method": "ListClusterDetails",
"params": {},
"id": 1})
return payload
def list_active_nodes_payload(cls_id):
payload = ({"method": "ListActiveNodes",
"params": {
"clusterID": cls_id
},
"id": 1})
return payload
def get_cluster_info_payload(cls_id):
payload = ({"method": "GetClusterInfo",
"params": {
"clusterID": cls_id
},
"id": 1})
return payload
def main():
"""
Nothing here as this is a module
"""
print(f"This is a support module and has no output of its own")
if __name__ == "__main__":
main()
|
def _apply_entities(text, entities, escape_map, format_map):
def inside_entities(i):
return any(map(lambda e:
e['offset'] <= i < e['offset']+e['length'],
entities))
# Split string into char sequence and escape in-place to
# preserve index positions.
seq = list(map(lambda c,i:
escape_map[c] # escape special characters
if c in escape_map and not inside_entities(i)
else c,
list(text), # split string to char sequence
range(0,len(text)))) # along with each char's index
# Ensure smaller offsets come first
sorted_entities = sorted(entities, key=lambda e: e['offset'])
offset = 0
result = ''
for e in sorted_entities:
f,n,t = e['offset'], e['length'], e['type']
result += ''.join(seq[offset:f])
if t in format_map:
# apply format
result += format_map[t](''.join(seq[f:f+n]), e)
else:
result += ''.join(seq[f:f+n])
offset = f + n
result += ''.join(seq[offset:])
return result
def apply_entities_as_markdown(text, entities):
"""
Format text as Markdown. Also take care of escaping special characters.
Returned value can be passed to :meth:`.Bot.sendMessage` with appropriate
``parse_mode``.
:param text:
plain text
:param entities:
a list of `MessageEntity <https://core.telegram.org/bots/api#messageentity>`_ objects
"""
escapes = {'*': '\\*',
'_': '\\_',
'[': '\\[',
'`': '\\`',}
formatters = {'bold': lambda s,e: '*'+s+'*',
'italic': lambda s,e: '_'+s+'_',
'text_link': lambda s,e: '['+s+']('+e['url']+')',
'text_mention': lambda s,e: '['+s+'](tg://user?id='+str(e['user']['id'])+')',
'code': lambda s,e: '`'+s+'`',
'pre': lambda s,e: '```text\n'+s+'```'}
return _apply_entities(text, entities, escapes, formatters)
def apply_entities_as_html(text, entities):
"""
Format text as HTML. Also take care of escaping special characters.
Returned value can be passed to :meth:`.Bot.sendMessage` with appropriate
``parse_mode``.
:param text:
plain text
:param entities:
a list of `MessageEntity <https://core.telegram.org/bots/api#messageentity>`_ objects
"""
escapes = {'<': '<',
'>': '>',
'&': '&',}
formatters = {'bold': lambda s,e: '<b>'+s+'</b>',
'italic': lambda s,e: '<i>'+s+'</i>',
'text_link': lambda s,e: '<a href="'+e['url']+'">'+s+'</a>',
'text_mention': lambda s,e: '<a href="tg://user?id='+str(e['user']['id'])+'">'+s+'</a>',
'code': lambda s,e: '<code>'+s+'</code>',
'pre': lambda s,e: '<pre>'+s+'</pre>'}
return _apply_entities(text, entities, escapes, formatters)
|
"""
getCameraFeature.py
Demonstrates how to get some information about a camera feature.
Note that there are two places to get information about a feature:
1) getCameraFeatures
2) getFeature
getCameraFeatures can be used to query (generally) static information about
a feature. e.g. number of parameters, if it's supported, param max min. etc.
getFeature is used to get the feature's current settings/value.
(setFeature is used to set the feature's current settings/value.)
"""
from pixelinkWrapper import*
def decode_feature_flags(flags):
if(flags & PxLApi.FeatureFlags.PRESENCE):
print("Flag PRESENCE - feature is supported")
if(flags & PxLApi.FeatureFlags.READ_ONLY):
print("Flag READ_ONLY - feature can only be read")
if(flags & PxLApi.FeatureFlags.DESC_SUPPORTED):
print("Flag DESC_SUPPORTED - feature can be saved to different descriptors")
if(flags & PxLApi.FeatureFlags.MANUAL):
print("Flag MANUAL - feature controlled by external app")
if(flags & PxLApi.FeatureFlags.AUTO):
print("Flag AUTO - feature automatically controlled by camera")
if(flags & PxLApi.FeatureFlags.ONEPUSH):
print("Flag ONEPUSH - camera sets feature only once, then returns to manual operation")
if(flags & PxLApi.FeatureFlags.OFF):
print("Flag OFF - feature is set to last known state and cannot be controlled by app")
"""
Print information about an individual camera feature
"""
def print_camera_feature(feature):
# Is the feature supported?
isSupported = feature.uFlags & PxLApi.FeatureFlags.PRESENCE
if(not(isSupported)):
print("Feature {0} is not supported".format(feature.uFeatureId))
else:
print("Number of parameters: {0}".format(feature.uNumberOfParameters))
print("Flags: {0}".format(feature.uFlags))
decode_feature_flags(feature.uFlags)
params = feature.Params
for i in range(feature.uNumberOfParameters):
print("Parameter {0}".format(i))
print("Min value: {0}".format(params[i].fMaxValue))
print("Max value: {0}".format(params[i].fMinValue))
"""
Print information about a feature.
This is one way to determine how many parameters are used by a feature.
The second way is demonstrated in print_feature_trigger.
The advantage of this method is that you can also see the max and min values
parameters supports.
Note that the max and min are exactly that: max and min.
It should not be assumed that all values between are supported.
For example, an ROI width parameter may have a min/max of 0/1600, but
widths of 7, 13, 59 etc. are not supported.
Note too that a feature's min and max values may change as other
features change.
For example, exposure and frame rate are interlinked, and changing
one may change the min/max for the other.
The feature flags reported by getCameraFeatures indicate which
flags are supported (e.g. FeatureFlags.AUTO). They do not indicate
the current settings; these are available through getFeature.
"""
def print_feature_parameter_info(hCamera, featureId):
assert 0 != hCamera, "No initialized camera"
print("\n----------Feature {0}----------\n".format(featureId))
# Read information about a feature
ret = PxLApi.getCameraFeatures(hCamera, featureId)
if(PxLApi.apiSuccess(ret[0])):
if(None != ret[1]):
cameraFeatures = ret[1]
assert 1 == cameraFeatures.uNumberOfFeatures, "Unexpected number of features"
assert cameraFeatures.Features[0].uFeatureId == featureId, "Unexpected returned featureId"
print_camera_feature(cameraFeatures.Features[0])
"""
In this case, what we'll do is demonstrate the use of FeatureId.ALL to read information
about all features at once.
However, we have to be careful because the order of the features is not
such that we can just index into the array using the feature id value.
Rather, we have to explicitly search the array for the specific feature.
"""
def print_feature_parameter_info2(hCamera, featureId):
assert 0 != hCamera, "No initialized camera"
featureIndex = -1
print("\n----------Feature {0}----------\n".format(featureId))
# Read information about all features
ret = PxLApi.getCameraFeatures(hCamera, PxLApi.FeatureId.ALL)
if(PxLApi.apiSuccess(ret[0])):
cameraFeatures = ret[1]
assert 1 < cameraFeatures.uNumberOfFeatures, "Unexpected number of features"
# Where in the structure of cameraFeatures is the feature we're interested in?
for i in range(cameraFeatures.uNumberOfFeatures):
if(featureId == cameraFeatures.Features[i].uFeatureId):
featureIndex = cameraFeatures.Features[i].uFeatureId
break
# Did we find it?
if(-1 == featureIndex):
print("ERROR: Unable to find the information for feature {0}".format(featureId))
return
print_camera_feature(cameraFeatures.Features[featureIndex])
"""
Feature Shutter
FeatureId.SHUTTER is the exposure time.
"""
def print_feature_shutter(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature Shutter:\n")
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.SHUTTER)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
print("Exposure time: {0} seconds\n".format(params[0]))
decode_feature_flags(flags)
"""
Feature White Balance
FeatureId.WHITE_BALANCE is not the RGB white balance, but rather the Color Temperature.
For the RGB white balance, see feature FeatureId.WHITE_SHADING.
Here we assume a colour camera.
If you're running this with a mono camera, getFeature will return an error.
"""
def print_feature_white_balance(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature White Balance:\n")
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.WHITE_BALANCE)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
print("Colour Temperature: {0} degrees Kelvin\n".format(params[0]))
decode_feature_flags(flags)
"""
Feature Trigger
At this point in time FeatureId.TRIGGER has 5 parameters.
"""
def print_feature_trigger(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature Trigger:\n")
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.TRIGGER)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
assert PxLApi.TriggerParams.NUM_PARAMS == len(params), "Returned Trigger params number is different"
print("Mode = {0}".format(params[PxLApi.TriggerParams.MODE]))
print("Type = {0} {1}".format(params[PxLApi.TriggerParams.TYPE],
decode_trigger_type(params[PxLApi.TriggerParams.TYPE])))
print("Polarity = {0} {1}".format(params[PxLApi.TriggerParams.POLARITY],
decode_polarity(params[PxLApi.TriggerParams.POLARITY])))
print("Delay = {0}".format(params[PxLApi.TriggerParams.DELAY]))
print("Parameter = {0}\n".format(params[PxLApi.TriggerParams.PARAMETER]))
decode_feature_flags(flags)
def decode_trigger_type(triggerType):
switcher = {
PxLApi.TriggerTypes.FREE_RUNNING: "trigger type FREE_RUNNING",
PxLApi.TriggerTypes.SOFTWARE: "trigger type SOFTWARE",
PxLApi.TriggerTypes.HARDWARE: "trigger type HARDWARE"
}
return switcher.get(triggerType, "Unknown trigger type")
def decode_polarity(polarity):
switcher = {
0: "negative polarity",
1: "positive polarity"
}
return switcher.get(polarity, "Unknown polarity")
"""
Feature GPIO
At this point in time we assume that GPIO has 6 parameters.
An error will be reported if you're using a microscopy camera
because they don't support GPIO.
"""
def print_feature_gpio(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature GPIO:\n")
# Get information about GPO1 by setting params[0] == 1
params = [1]
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.GPIO, params)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
assert PxLApi.GpioParams.NUM_PARAMS == len(params), "Returned GPIO params number is different"
print("GpioNumber = {0}".format(params[PxLApi.GpioParams.INDEX]))
print("Mode = {0}".format(params[PxLApi.GpioParams.MODE]))
print("Polarity = {0} {1}".format(params[PxLApi.GpioParams.POLARITY],
decode_polarity(params[PxLApi.GpioParams.POLARITY])))
decode_feature_flags(flags)
"""
Feature Saturation
Again we assume that this is a color camera.
getFeature will return an error if the camera is a mono camera.
"""
def print_feature_saturation(hCamera):
assert 0 != hCamera, "No initialized camera"
print("\n------------------------------")
print("Print feature Saturation:\n")
ret = PxLApi.getFeature(hCamera, PxLApi.FeatureId.SATURATION)
if(PxLApi.apiSuccess(ret[0])):
flags = ret[1]
params = ret[2]
assert 1 == len(params), "Returned params number is different"
print("Saturation = {0}".format(params[0]))
decode_feature_flags(flags)
def main():
# We assume there's only one camera connected
ret = PxLApi.initialize(0)
if(PxLApi.apiSuccess(ret[0])):
hCamera = ret[1]
# Print some information about the camera
print_feature_parameter_info(hCamera, PxLApi.FeatureId.SHUTTER)
print_feature_shutter(hCamera)
print_feature_parameter_info(hCamera, PxLApi.FeatureId.WHITE_BALANCE)
print_feature_white_balance(hCamera)
print_feature_parameter_info(hCamera, PxLApi.FeatureId.TRIGGER)
print_feature_trigger(hCamera)
print_feature_parameter_info(hCamera, PxLApi.FeatureId.GPIO)
print_feature_gpio(hCamera)
print_feature_parameter_info(hCamera, PxLApi.FeatureId.SATURATION)
print_feature_saturation(hCamera)
# Demonstrate two ways to get the same information
print_feature_parameter_info(hCamera, PxLApi.FeatureId.ROI)
print_feature_parameter_info2(hCamera, PxLApi.FeatureId.ROI)
# Uninitialize the camera now that we're done with it.
PxLApi.uninitialize(hCamera)
return 0
else:
print("ERROR: {0}\n".format(ret[0]))
return 1
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
#
# Import needed libraries
from mcpi.minecraft import Minecraft
import mcpi.block as block
import time
mc = Minecraft.create() # Connect to Minecraft, running on the local PC
# Create a monolith around the origin
mc.setBlocks(-1, -20, -1, 1, 20, 1, block.GLOWING_OBSIDIAN.id)
# A while loop that continues until the program is stopped.
while True:
pos = mc.player.getTilePos() # Get the player tile position
# Test if the players tile position is next to the monolith
if pos.x >= -2 and pos.x <= 2 and pos.y >= -21 and pos.y <= 21 and pos.z >= -2 and pos.z <= 2:
mc.postToChat("WARNING: next to monolith!") # Post a message to the chat
# Sleep for a fifth of a second
time.sleep(0.2)
|
def minSubArrayLen(target, nums):
min_window_size = 2 ** 31 - 1
current_window_sum = 0
window_start = 0
flag = False
for window_end in range(len(nums)):
current_window_sum += nums[window_end]
while current_window_sum >= target:
min_window_size = min(min_window_size, window_end - window_start + 1)
current_window_sum -= nums[window_start]
window_start += 1
flag = True
return min_window_size if flag == True else 0
|
import numpy as np
import chainer
from chainer import cuda,Function,gradient_check,report,training,utils,Variable
from chainer import datasets,iterators,optimizers,serializers
from chainer import Link,Chain,ChainList
from chainer import functions as F
from chainer import links as L
from chainer.training import extensions
def VariableTest1():
x=np.array([5],dtype=np.float32)
x=Variable(x)
y=x**2-2*x+1
print(y,y.data)
y.backward()
print(x.grad)
z=2*x
y=x**2-z+1
y.backward()
print("x.grad",x.grad)
print("z.grad",z.grad)
"""
preserve gradient information of intermediate variable
"""
y.backward(retain_grad=True)
print("z.grad",z.grad)
def VariableTest2():
x=Variable(np.array([[1,2,3],[4,5,6]],dtype=np.float32))
y=x**2-2*x+1
"""
Note that if we want to start backward computation from a variable holding a multi-element array,
we must set the initial error manually.
This is done simply by setting the grad attribute of the output variable
i.e. y.gras=np.ones
"""
y.grad=np.ones((2,3),dtype=np.float32)
y.backward()
print(x.grad)
def LinkTest1():
f=L.Linear(3,2)
f.W.data=np.array([[1,2,3],[4,5,6]],dtype=np.float32)
f.b.data=np.array([1,2],dtype=np.float32)
x=Variable(np.array([[1,2,3],[4,5,6]],dtype=np.float32))
print(f.W.data,f.b.data,f.W.data.shape)
y=f(x)
print(y.data)
"""
A=1,2,3
4 5 6
B=1,4
2,3
3,6
matrix product AB=14 ,32
32 ,77
"""
#lets backward
f.cleargrads()
y.grad=np.ones((2,2),dtype=np.float32)
y.backward()
print(f.W.grad)
print(f.b.grad)
def main():
LinkTest1()
if __name__ == '__main__':
main()
|
# Copyright (C) 2020 University of Oxford
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Covid-19 Infected Situation Reports
# https://covid19.th-stat.com/en/api
#
import logging
import requests
import pandas as pd
from datetime import datetime
__all__ = ('ThailandSTATFetcher',)
from utils.fetcher.base_epidemiology import BaseEpidemiologyFetcher
logger = logging.getLogger(__name__)
class ThailandSTATFetcher(BaseEpidemiologyFetcher):
LOAD_PLUGIN = True
SOURCE = 'THA_STAT'
def fetch(self, category):
return requests.get(f'https://covid19.th-stat.com/api/open/{category}').json()
def run(self):
logger.debug('Fetching country-level information')
data = self.fetch('timeline')
for record in data['Data']:
upsert_obj = {
'source': self.SOURCE,
'date': datetime.strptime(record['Date'], '%m/%d/%Y').strftime('%Y-%m-%d'),
'country': 'Thailand',
'countrycode': 'THA',
'gid': ['THA'],
'confirmed': int(record['Confirmed']),
'dead': int(record['Deaths']),
'recovered': int(record['Recovered']),
'hospitalised': int(record['Hospitalized'])
}
self.upsert_data(**upsert_obj)
logger.debug('Fetching regional information')
data = self.fetch('cases')
# Get cumulative counts from the cross table of dates and provinces
df = pd.DataFrame(data['Data'], columns=['ConfirmDate', 'ProvinceEn'])
crosstabsum = pd.crosstab(df.ConfirmDate.apply(lambda d: d[:10]), df.ProvinceEn) \
.sort_index() \
.cumsum()
for confirmdate, row in crosstabsum.iterrows():
for provinceen, confirmed in row.items():
success, adm_area_1, adm_area_2, adm_area_3, gid = self.adm_translator.tr(
input_adm_area_1=provinceen,
input_adm_area_2=None,
input_adm_area_3=None,
return_original_if_failure=True
)
upsert_obj = {
'source': self.SOURCE,
'date': confirmdate,
'country': 'Thailand',
'countrycode': 'THA',
'adm_area_1': adm_area_1,
'adm_area_2': adm_area_2,
'adm_area_3': adm_area_3,
'gid': gid,
'confirmed': int(confirmed)
}
self.upsert_data(**upsert_obj)
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A factory-pattern class which returns a dataset. Modified from the slim image classification model library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from datasets import anime_faces
from datasets import celeba_facenet
from datasets import image_only
from datasets import image_pair
from datasets import danbooru_2_illust2vec
from datasets import svhn
from datasets import celeba
# The flags here are shared among datasets. Each dataset will definne its own default values.
tf.flags.DEFINE_integer('num_classes', 0, 'number of classses in this dataset.')
tf.flags.DEFINE_integer('embedding_size', 0, 'embedding size of this dataset.')
# Data size is used for decreasing learning rate and for evaluating once through the dataset.
tf.flags.DEFINE_integer('train_size', 0, 'embedding size of this dataset.')
tf.flags.DEFINE_integer('validation_size', 0, 'embedding size of this dataset.')
tf.flags.DEFINE_boolean('dataset_use_target', False,
'If set, outputs images to "target". Otherwise outputs to "source". '
'Check each dataset to see if this flag is used.')
tf.flags.DEFINE_string('tags_id_lookup_file', './datasets/illust2vec_tag_list.txt',
'Optional path to the tags to be processed by tensorflow.contrib.lookup.index_table_from_file'
'e.g. for illust2vec (./datasets/illust2vec_tag_list.txt) the line format is: '
'original_illust2vec_id, tag, group. ')
tf.flags.DEFINE_integer('tags_key_column_index', None, 'See tensorflow.contrib.lookup.index_table_from_file.')
tf.flags.DEFINE_integer('tags_value_column_index', None, 'See tensorflow.contrib.lookup.index_table_from_file.')
FLAGS = tf.flags.FLAGS
datasets_map = {
'anime_faces': anime_faces,
'celeba': celeba,
'celeba_facenet': celeba_facenet,
'danbooru_2_illust2vec': danbooru_2_illust2vec,
'image_only': image_only,
'image_pair': image_pair,
'svhn': svhn,
}
def get_dataset(name, split_name, dataset_dir, file_pattern=None, reader=None):
"""Given a dataset name and a split_name returns a Dataset.
Args:
name: String, the name of the dataset.
split_name: A train/test split name.
dataset_dir: The directory where the dataset files are stored.
file_pattern: The file pattern to use for matching the dataset source files.
reader: The subclass of tf.ReaderBase. If left as `None`, then the default
reader defined by each dataset is used.
Returns:
A `Dataset` class.
Raises:
ValueError: If the dataset `name` is unknown.
"""
if name not in datasets_map:
raise ValueError('Name of dataset unknown %s' % name)
dataset = datasets_map[name].get_split(
split_name,
dataset_dir,
file_pattern,
reader)
dataset.name = name
if FLAGS.train_size and split_name == 'train':
dataset.num_samples = FLAGS.train_size
else:
if FLAGS.validation_size:
dataset.num_samples = FLAGS.validation_size
return dataset
|
from django.urls import path
from rest_framework.authtoken.views import obtain_auth_token
from api import views
app_name = 'api'
urlpatterns = [
path('feed/detail/', views.FeedDetailView.as_view(), name='feed-detail'),
path('feed/detail/no-auth/', views.FeedDetailAllowAnyView.as_view(), name='feed-detail-no-auth'),
path('feed/detail/save/', views.FeedDetailSaveView.as_view(), name='feed-detail-save'),
path('token/', obtain_auth_token, name='obtain-auth-token')
]
|
from datetime import datetime
import numpy as np
inputs = open('../input.txt', 'r')
data = inputs.readlines()
guards = dict()
class SleepSession:
def __init__(self, falls_asleep_time):
self.asleep_time = falls_asleep_time
self.awake_time = None
def wake(self, wakes_up_time):
self.awake_time = wakes_up_time
@property
def sleep_minutes(self):
session_minutes = (self.awake_time - self.asleep_time).seconds // 60
return session_minutes
@property
def sleep_hour_array(self):
hour = np.full(60, False, dtype=bool)
start_index = self.asleep_time.minute if self.asleep_time else None
end_index = self.awake_time.minute if self.awake_time else None
if start_index and end_index:
hour[start_index:end_index] = True
return hour
class GuardShift:
def __init__(self, start_datetime):
self.shift_start = start_datetime
self.sleep_sessions = []
self.current_session = None
def sleep(self, start_datetime):
self.current_session = SleepSession(start_datetime)
self.sleep_sessions.append(self.current_session)
def wake(self, end_datetime):
self.current_session.wake(end_datetime)
self.current_session = None
@staticmethod
def start_shift(start_datetime):
return GuardShift(start_datetime)
@property
def total_sleep_minutes(self):
shift_minutes = sum([s.sleep_minutes for s in self.sleep_sessions])
return shift_minutes
@property
def sleep_matrix(self):
return np.array([session.sleep_hour_array for session in self.sleep_sessions])
class Guard:
def __init__(self, guard_id):
self.id = guard_id
self.shifts = []
def record_shift(self, shift):
self.shifts.append(shift)
@property
def total_sleep_minutes(self):
return sum([s.total_sleep_minutes for s in self.shifts])
@property
def full_sleep_matrix(self):
return np.concatenate([shift.sleep_matrix for shift in self.shifts])
@property
def total_days_slept_by_minute(self):
return np.sum(self.full_sleep_matrix, axis=0)
@property
def sleepiest_minute(self):
return np.argmax(self.total_days_slept_by_minute)
class ShiftProcessor:
guards = dict()
current_guard = None
current_shift = None
@staticmethod
def parse_line(log_line):
log_line = log_line.rstrip().lstrip()
open_date_bracket = log_line.find('[')
close_date_bracket = log_line.find(']')
date_timestamp = datetime.fromisoformat(log_line[open_date_bracket+1:close_date_bracket])
rest_of_line = log_line[close_date_bracket+1:].lstrip()
line_type = 'shift'
guard_number = None
if rest_of_line == 'falls asleep':
line_type = 'sleep'
elif rest_of_line == 'wakes up':
line_type = 'wake'
if line_type == 'shift':
guard_number = int(rest_of_line.split(' ')[1].lstrip('#'))
return line_type, date_timestamp, guard_number
def next_line(self, line):
try:
(log_type, timestamp, guard_number) = line
if log_type == 'shift' and guard_number:
self.current_shift = None
if guard_number in self.guards:
self.current_guard = self.guards[guard_number]
else:
self.current_guard = Guard(guard_number)
self.guards[guard_number] = self.current_guard
self.current_shift = GuardShift.start_shift(timestamp)
self.current_guard.record_shift(self.current_shift)
elif log_type == 'sleep' and self.current_shift:
self.current_shift.sleep(timestamp)
elif log_type == 'wake' and self.current_shift:
self.current_shift.wake(timestamp)
except Exception:
print(line)
quit()
lines = map(ShiftProcessor.parse_line, data)
log = sorted(lines, key=lambda x: x[1])
processor = ShiftProcessor()
for line in log:
processor.next_line(line)
guards_list = list(processor.guards.items())
guards_list = sorted(guards_list, key=lambda g: g[1].total_sleep_minutes, reverse=True)
laziest_guard = guards_list[0][1]
print('Guard {}: {} minutes'.format(laziest_guard.id, laziest_guard.total_sleep_minutes))
print('Sleepiest minute: {}'.format(laziest_guard.sleepiest_minute))
print('Answer result: {}'.format(laziest_guard.id * laziest_guard.sleepiest_minute))
|
from __future__ import annotations
from ._base import TelegramObject
class CallbackGame(TelegramObject):
"""
A placeholder, currently holds no information. Use BotFather to set up your game.
Source: https://core.telegram.org/bots/api#callbackgame
"""
|
# Generated by Django 3.1.4 on 2021-01-15 14:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dubitoapp', '0019_auto_20210115_0037'),
]
operations = [
migrations.AddField(
model_name='game',
name='is_public',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='player',
name='game_id',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='dubitoapp.game'),
),
]
|
# coding=utf-8
# Copyright 2020 The Trax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for trax.rl.awr_trainer."""
import contextlib
import functools
import tempfile
from absl.testing import absltest
from tensor2tensor.envs import gym_env_problem
from tensor2tensor.rl import gym_utils
from tensorflow.compat.v1 import test
from tensorflow.compat.v1.io import gfile
from trax import layers
from trax import optimizers
from trax.rl import awr_trainer
class AwrTrainerTest(absltest.TestCase):
def get_wrapped_env(self,
name='CartPole-v0',
max_episode_steps=10,
batch_size=2):
wrapper_fn = functools.partial(
gym_utils.gym_env_wrapper,
**{
'rl_env_max_episode_steps': max_episode_steps,
'maxskip_env': False,
'rendered_env': False,
'rendered_env_resize_to': None, # Do not resize frames
'sticky_actions': False,
'output_dtype': None,
'num_actions': None,
})
return gym_env_problem.GymEnvProblem(
base_env_name=name,
batch_size=batch_size,
env_wrapper_fn=wrapper_fn,
discrete_rewards=False)
@contextlib.contextmanager
def tmp_dir(self):
tmp = tempfile.mkdtemp(dir=test.get_temp_dir())
yield tmp
gfile.rmtree(tmp)
def _make_trainer(self,
train_env,
eval_env,
output_dir,
num_samples_to_collect=20,
replay_buffer_sample_size=50,
model=None,
optimizer=None,
max_timestep=None,
**kwargs):
if model is None:
# pylint: disable=g-long-lambda
model = lambda: layers.Serial(
layers.Dense(32),
layers.Relu(),
)
# pylint: enable=g-long-lambda
if optimizer is None:
optimizer = functools.partial(optimizers.SGD, 5e-5)
return awr_trainer.AwrTrainer(
train_env=train_env,
eval_env=eval_env,
policy_and_value_model=model,
policy_and_value_optimizer=optimizer,
num_samples_to_collect=num_samples_to_collect,
replay_buffer_sample_size=replay_buffer_sample_size,
actor_optimization_steps=2,
critic_optimization_steps=2,
output_dir=output_dir,
random_seed=0,
max_timestep=max_timestep,
boundary=20,
actor_loss_weight=1.0,
entropy_bonus=0.01,
**kwargs)
def test_training_loop_cartpole(self):
with self.tmp_dir() as output_dir:
trainer = self._make_trainer(
train_env=self.get_wrapped_env('CartPole-v0', 10),
eval_env=self.get_wrapped_env('CartPole-v0', 10),
output_dir=output_dir,
num_samples_to_collect=20,
max_timestep=20,
replay_buffer_sample_size=50,
)
trainer.training_loop(n_epochs=2)
if __name__ == '__main__':
absltest.main()
|
class Solution:
def get_sub_XXX(self, root):
if root == None: return 0
if root.left == None and root.right == None:
return 1
return 1 + max(self.get_sub_XXX(root.left), self.get_sub_XXX(root.right))
def XXX(self, root: 'TreeNode') -> 'int':
return self.get_sub_XXX(root)
|
# Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import annotations
from typing import Callable, Dict, Iterable, List, Optional, Tuple, Type, TypeVar, Union
import numpy as np
from braket.circuits import compiler_directives
from braket.circuits.ascii_circuit_diagram import AsciiCircuitDiagram
from braket.circuits.gate import Gate
from braket.circuits.instruction import Instruction
from braket.circuits.moments import Moments
from braket.circuits.noise import Noise
from braket.circuits.noise_helpers import (
apply_noise_to_gates,
apply_noise_to_moments,
check_noise_target_gates,
check_noise_target_qubits,
check_noise_target_unitary,
wrap_with_list,
)
from braket.circuits.observable import Observable
from braket.circuits.observables import TensorProduct
from braket.circuits.qubit import QubitInput
from braket.circuits.qubit_set import QubitSet, QubitSetInput
from braket.circuits.result_type import ObservableResultType, ResultType
from braket.circuits.unitary_calculation import calculate_unitary
from braket.ir.jaqcd import Program
SubroutineReturn = TypeVar(
"SubroutineReturn", Iterable[Instruction], Instruction, ResultType, Iterable[ResultType]
)
SubroutineCallable = TypeVar("SubroutineCallable", bound=Callable[..., SubroutineReturn])
AddableTypes = TypeVar("AddableTypes", SubroutineReturn, SubroutineCallable)
class Circuit:
"""
A representation of a quantum circuit that contains the instructions to be performed on a
quantum device and the requested result types.
See :mod:`braket.circuits.gates` module for all of the supported instructions.
See :mod:`braket.circuits.result_types` module for all of the supported result types.
`AddableTypes` are `Instruction`, iterable of `Instruction`, `ResultType`,
iterable of `ResultType`, or `SubroutineCallable`
"""
_ALL_QUBITS = "ALL" # Flag to indicate all qubits in _qubit_observable_mapping
@classmethod
def register_subroutine(cls, func: SubroutineCallable) -> None:
"""
Register the subroutine `func` as an attribute of the `Circuit` class. The attribute name
is the name of `func`.
Args:
func (Callable[..., Union[Instruction, Iterable[Instruction], ResultType,
Iterable[ResultType]]): The function of the subroutine to add to the class.
Examples:
>>> def h_on_all(target):
... circ = Circuit()
... for qubit in target:
... circ += Instruction(Gate.H(), qubit)
... return circ
...
>>> Circuit.register_subroutine(h_on_all)
>>> circ = Circuit().h_on_all(range(2))
>>> for instr in circ.instructions:
... print(instr)
...
Instruction('operator': 'H', 'target': QubitSet(Qubit(0),))
Instruction('operator': 'H', 'target': QubitSet(Qubit(1),))
"""
def method_from_subroutine(self, *args, **kwargs) -> SubroutineReturn:
return self.add(func, *args, **kwargs)
function_name = func.__name__
setattr(cls, function_name, method_from_subroutine)
function_attr = getattr(cls, function_name)
setattr(function_attr, "__doc__", func.__doc__)
def __init__(self, addable: AddableTypes = None, *args, **kwargs):
"""
Args:
addable (AddableTypes): The item(s) to add to self.
Default = None.
*args: Variable length argument list. Supports any arguments that `add()` offers.
**kwargs: Arbitrary keyword arguments. Supports any keyword arguments that `add()`
offers.
Raises:
TypeError: If `addable` is an unsupported type.
Examples:
>>> circ = Circuit([Instruction(Gate.H(), 4), Instruction(Gate.CNot(), [4, 5])])
>>> circ = Circuit().h(0).cnot(0, 1)
>>> circ = Circuit().h(0).cnot(0, 1).probability([0, 1])
>>> @circuit.subroutine(register=True)
>>> def bell_pair(target):
... return Circ().h(target[0]).cnot(target[0:2])
...
>>> circ = Circuit(bell_pair, [4,5])
>>> circ = Circuit().bell_pair([4,5])
"""
self._moments: Moments = Moments()
self._result_types: Dict[ResultType] = {}
self._qubit_observable_mapping: Dict[Union[int, Circuit._ALL_QUBITS], Observable] = {}
self._qubit_observable_target_mapping: Dict[int, Tuple[int]] = {}
self._qubit_observable_set = set()
self._observables_simultaneously_measurable = True
self._has_compiler_directives = False
if addable is not None:
self.add(addable, *args, **kwargs)
@property
def depth(self) -> int:
"""int: Get the circuit depth."""
return self._moments.depth
@property
def instructions(self) -> Iterable[Instruction]:
"""Iterable[Instruction]: Get an `iterable` of instructions in the circuit."""
return self._moments.values()
@property
def result_types(self) -> List[ResultType]:
"""List[ResultType]: Get a list of requested result types in the circuit."""
return list(self._result_types.keys())
@property
def basis_rotation_instructions(self) -> List[Instruction]:
"""List[Instruction]: Get a list of basis rotation instructions in the circuit.
These basis rotation instructions are added if result types are requested for
an observable other than Pauli-Z.
This only makes sense if all observables are simultaneously measurable;
if not, this method will return an empty list.
"""
# Note that basis_rotation_instructions can change each time a new instruction
# is added to the circuit because `self._moments.qubits` would change
basis_rotation_instructions = []
all_qubit_observable = self._qubit_observable_mapping.get(Circuit._ALL_QUBITS)
if all_qubit_observable:
for target in self.qubits:
basis_rotation_instructions += Circuit._observable_to_instruction(
all_qubit_observable, target
)
return basis_rotation_instructions
target_lists = sorted(set(self._qubit_observable_target_mapping.values()))
for target_list in target_lists:
observable = self._qubit_observable_mapping[target_list[0]]
basis_rotation_instructions += Circuit._observable_to_instruction(
observable, target_list
)
return basis_rotation_instructions
@staticmethod
def _observable_to_instruction(observable: Observable, target_list: List[int]):
return [Instruction(gate, target_list) for gate in observable.basis_rotation_gates]
@property
def moments(self) -> Moments:
"""Moments: Get the `moments` for this circuit. Note that this includes observables."""
return self._moments
@property
def qubit_count(self) -> int:
"""Get the qubit count for this circuit. Note that this includes observables."""
all_qubits = self._moments.qubits.union(self._qubit_observable_set)
return len(all_qubits)
@property
def qubits(self) -> QubitSet:
"""QubitSet: Get a copy of the qubits for this circuit."""
return QubitSet(self._moments.qubits.union(self._qubit_observable_set))
def add_result_type(
self,
result_type: ResultType,
target: QubitSetInput = None,
target_mapping: Dict[QubitInput, QubitInput] = None,
) -> Circuit:
"""
Add a requested result type to `self`, returns `self` for chaining ability.
Args:
result_type (ResultType): `ResultType` to add into `self`.
target (int, Qubit, or iterable of int / Qubit, optional): Target qubits for the
`result_type`.
Default = `None`.
target_mapping (dictionary[int or Qubit, int or Qubit], optional): A dictionary of
qubit mappings to apply to the `result_type.target`. Key is the qubit in
`result_type.target` and the value is what the key will be changed to.
Default = `None`.
Note: target and target_mapping will only be applied to those requested result types with
the attribute `target`. The result_type will be appended to the end of the dict keys of
`circuit.result_types` only if it does not already exist in `circuit.result_types`
Returns:
Circuit: self
Raises:
TypeError: If both `target_mapping` and `target` are supplied.
Examples:
>>> result_type = ResultType.Probability(target=[0, 1])
>>> circ = Circuit().add_result_type(result_type)
>>> print(circ.result_types[0])
Probability(target=QubitSet([Qubit(0), Qubit(1)]))
>>> result_type = ResultType.Probability(target=[0, 1])
>>> circ = Circuit().add_result_type(result_type, target_mapping={0: 10, 1: 11})
>>> print(circ.result_types[0])
Probability(target=QubitSet([Qubit(10), Qubit(11)]))
>>> result_type = ResultType.Probability(target=[0, 1])
>>> circ = Circuit().add_result_type(result_type, target=[10, 11])
>>> print(circ.result_types[0])
Probability(target=QubitSet([Qubit(10), Qubit(11)]))
>>> result_type = ResultType.StateVector()
>>> circ = Circuit().add_result_type(result_type)
>>> print(circ.result_types[0])
StateVector()
"""
if target_mapping and target is not None:
raise TypeError("Only one of 'target_mapping' or 'target' can be supplied.")
if not target_mapping and not target:
# Nothing has been supplied, add result_type
result_type_to_add = result_type
elif target_mapping:
# Target mapping has been supplied, copy result_type
result_type_to_add = result_type.copy(target_mapping=target_mapping)
else:
# ResultType with target
result_type_to_add = result_type.copy(target=target)
if result_type_to_add not in self._result_types:
observable = Circuit._extract_observable(result_type_to_add)
if observable and self._observables_simultaneously_measurable:
# Only check if all observables can be simultaneously measured
self._add_to_qubit_observable_mapping(observable, result_type_to_add.target)
self._add_to_qubit_observable_set(result_type_to_add)
# using dict as an ordered set, value is arbitrary
self._result_types[result_type_to_add] = None
return self
@staticmethod
def _extract_observable(result_type: ResultType) -> Optional[Observable]:
if isinstance(result_type, ResultType.Probability):
return Observable.Z() # computational basis
elif isinstance(result_type, ObservableResultType):
return result_type.observable
else:
return None
def _add_to_qubit_observable_mapping(
self, observable: Observable, observable_target: QubitSet
) -> None:
targets = observable_target or list(self._qubit_observable_set)
all_qubits_observable = self._qubit_observable_mapping.get(Circuit._ALL_QUBITS)
tensor_product_dict = (
Circuit._tensor_product_index_dict(observable, observable_target)
if isinstance(observable, TensorProduct)
else None
)
identity = Observable.I()
for i in range(len(targets)):
target = targets[i]
new_observable = tensor_product_dict[i][0] if tensor_product_dict else observable
current_observable = all_qubits_observable or self._qubit_observable_mapping.get(target)
add_observable = not current_observable or (
current_observable == identity and new_observable != identity
)
if (
not add_observable
and current_observable != identity
and new_observable != identity
and current_observable != new_observable
):
return self._encounter_noncommuting_observable()
if observable_target:
new_targets = (
tensor_product_dict[i][1] if tensor_product_dict else tuple(observable_target)
)
if add_observable:
self._qubit_observable_target_mapping[target] = new_targets
self._qubit_observable_mapping[target] = new_observable
elif new_observable.qubit_count > 1:
current_target = self._qubit_observable_target_mapping.get(target)
if current_target and current_target != new_targets:
return self._encounter_noncommuting_observable()
if not observable_target:
if all_qubits_observable and all_qubits_observable != observable:
return self._encounter_noncommuting_observable()
self._qubit_observable_mapping[Circuit._ALL_QUBITS] = observable
@staticmethod
def _tensor_product_index_dict(
observable: TensorProduct, observable_target: QubitSet
) -> Dict[int, Tuple[Observable, Tuple[int, ...]]]:
obj_dict = {}
i = 0
factors = list(observable.factors)
total = factors[0].qubit_count
while factors:
if i >= total:
factors.pop(0)
if factors:
total += factors[0].qubit_count
if factors:
first = total - factors[0].qubit_count
obj_dict[i] = (factors[0], tuple(observable_target[first:total]))
i += 1
return obj_dict
def _add_to_qubit_observable_set(self, result_type: ResultType) -> None:
if isinstance(result_type, ObservableResultType) and result_type.target:
self._qubit_observable_set.update(result_type.target)
def add_instruction(
self,
instruction: Instruction,
target: QubitSetInput = None,
target_mapping: Dict[QubitInput, QubitInput] = None,
) -> Circuit:
"""
Add an instruction to `self`, returns `self` for chaining ability.
Args:
instruction (Instruction): `Instruction` to add into `self`.
target (int, Qubit, or iterable of int / Qubit, optional): Target qubits for the
`instruction`. If a single qubit gate, an instruction is created for every index
in `target`.
Default = `None`.
target_mapping (dictionary[int or Qubit, int or Qubit], optional): A dictionary of
qubit mappings to apply to the `instruction.target`. Key is the qubit in
`instruction.target` and the value is what the key will be changed to.
Default = `None`.
Returns:
Circuit: self
Raises:
TypeError: If both `target_mapping` and `target` are supplied.
Examples:
>>> instr = Instruction(Gate.CNot(), [0, 1])
>>> circ = Circuit().add_instruction(instr)
>>> print(circ.instructions[0])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(0), Qubit(1)))
>>> instr = Instruction(Gate.CNot(), [0, 1])
>>> circ = Circuit().add_instruction(instr, target_mapping={0: 10, 1: 11})
>>> print(circ.instructions[0])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(10), Qubit(11)))
>>> instr = Instruction(Gate.CNot(), [0, 1])
>>> circ = Circuit().add_instruction(instr, target=[10, 11])
>>> print(circ.instructions[0])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(10), Qubit(11)))
>>> instr = Instruction(Gate.H(), 0)
>>> circ = Circuit().add_instruction(instr, target=[10, 11])
>>> print(circ.instructions[0])
Instruction('operator': 'H', 'target': QubitSet(Qubit(10),))
>>> print(circ.instructions[1])
Instruction('operator': 'H', 'target': QubitSet(Qubit(11),))
"""
if target_mapping and target is not None:
raise TypeError("Only one of 'target_mapping' or 'target' can be supplied.")
if not target_mapping and not target:
# Nothing has been supplied, add instruction
instructions_to_add = [instruction]
elif target_mapping:
# Target mapping has been supplied, copy instruction
instructions_to_add = [instruction.copy(target_mapping=target_mapping)]
elif hasattr(instruction.operator, "qubit_count") and instruction.operator.qubit_count == 1:
# single qubit operator with target, add an instruction for each target
instructions_to_add = [instruction.copy(target=qubit) for qubit in target]
else:
# non single qubit operator with target, add instruction with target
instructions_to_add = [instruction.copy(target=target)]
self._moments.add(instructions_to_add)
return self
def add_circuit(
self,
circuit: Circuit,
target: QubitSetInput = None,
target_mapping: Dict[QubitInput, QubitInput] = None,
) -> Circuit:
"""
Add a `circuit` to self, returns self for chaining ability.
Args:
circuit (Circuit): Circuit to add into self.
target (int, Qubit, or iterable of int / Qubit, optional): Target qubits for the
supplied circuit. This is a macro over `target_mapping`; `target` is converted to
a `target_mapping` by zipping together a sorted `circuit.qubits` and `target`.
Default = `None`.
target_mapping (dictionary[int or Qubit, int or Qubit], optional): A dictionary of
qubit mappings to apply to the qubits of `circuit.instructions`. Key is the qubit
to map, and the value is what to change it to. Default = `None`.
Returns:
Circuit: self
Raises:
TypeError: If both `target_mapping` and `target` are supplied.
Note:
Supplying `target` sorts `circuit.qubits` to have deterministic behavior since
`circuit.qubits` ordering is based on how instructions are inserted.
Use caution when using this with circuits that with a lot of qubits, as the sort
can be resource-intensive. Use `target_mapping` to use a linear runtime to remap
the qubits.
Requested result types of the circuit that will be added will be appended to the end
of the list for the existing requested result types. A result type to be added that is
equivalent to an existing requested result type will not be added.
Examples:
>>> widget = Circuit().h(0).cnot(0, 1)
>>> circ = Circuit().add_circuit(widget)
>>> instructions = list(circ.instructions)
>>> print(instructions[0])
Instruction('operator': 'H', 'target': QubitSet(Qubit(0),))
>>> print(instructions[1])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(0), Qubit(1)))
>>> widget = Circuit().h(0).cnot(0, 1)
>>> circ = Circuit().add_circuit(widget, target_mapping={0: 10, 1: 11})
>>> instructions = list(circ.instructions)
>>> print(instructions[0])
Instruction('operator': 'H', 'target': QubitSet(Qubit(10),))
>>> print(instructions[1])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(10), Qubit(11)))
>>> widget = Circuit().h(0).cnot(0, 1)
>>> circ = Circuit().add_circuit(widget, target=[10, 11])
>>> instructions = list(circ.instructions)
>>> print(instructions[0])
Instruction('operator': 'H', 'target': QubitSet(Qubit(10),))
>>> print(instructions[1])
Instruction('operator': 'CNOT', 'target': QubitSet(Qubit(10), Qubit(11)))
"""
if target_mapping and target is not None:
raise TypeError("Only one of 'target_mapping' or 'target' can be supplied.")
elif target is not None:
keys = sorted(circuit.qubits)
values = target
target_mapping = dict(zip(keys, values))
for instruction in circuit.instructions:
self.add_instruction(instruction, target_mapping=target_mapping)
for result_type in circuit.result_types:
self.add_result_type(result_type, target_mapping=target_mapping)
return self
def add_verbatim_box(
self,
verbatim_circuit: Circuit,
target: QubitSetInput = None,
target_mapping: Dict[QubitInput, QubitInput] = None,
) -> Circuit:
"""
Add a verbatim `circuit` to self, that is, ensures that `circuit` is not modified in any way
by the compiler.
Args:
verbatim_circuit (Circuit): Circuit to add into self.
target (int, Qubit, or iterable of int / Qubit, optional): Target qubits for the
supplied circuit. This is a macro over `target_mapping`; `target` is converted to
a `target_mapping` by zipping together a sorted `circuit.qubits` and `target`.
Default = `None`.
target_mapping (dictionary[int or Qubit, int or Qubit], optional): A dictionary of
qubit mappings to apply to the qubits of `circuit.instructions`. Key is the qubit
to map, and the value is what to change it to. Default = `None`.
Returns:
Circuit: self
Raises:
TypeError: If both `target_mapping` and `target` are supplied.
ValueError: If `circuit` has result types attached
Examples:
>>> widget = Circuit().h(0).h(1)
>>> circ = Circuit().add_verbatim_box(widget)
>>> print(list(circ.instructions))
[Instruction('operator': StartVerbatimBox, 'target': QubitSet([])),
Instruction('operator': H('qubit_count': 1), 'target': QubitSet([Qubit(0)])),
Instruction('operator': H('qubit_count': 1), 'target': QubitSet([Qubit(1)])),
Instruction('operator': EndVerbatimBox, 'target': QubitSet([]))]
>>> widget = Circuit().h(0).cnot(0, 1)
>>> circ = Circuit().add_verbatim_box(widget, target_mapping={0: 10, 1: 11})
>>> print(list(circ.instructions))
[Instruction('operator': StartVerbatimBox, 'target': QubitSet([])),
Instruction('operator': H('qubit_count': 1), 'target': QubitSet([Qubit(10)])),
Instruction('operator': H('qubit_count': 1), 'target': QubitSet([Qubit(11)])),
Instruction('operator': EndVerbatimBox, 'target': QubitSet([]))]
>>> widget = Circuit().h(0).cnot(0, 1)
>>> circ = Circuit().add_verbatim_box(widget, target=[10, 11])
>>> print(list(circ.instructions))
[Instruction('operator': StartVerbatimBox, 'target': QubitSet([])),
Instruction('operator': H('qubit_count': 1), 'target': QubitSet([Qubit(10)])),
Instruction('operator': H('qubit_count': 1), 'target': QubitSet([Qubit(11)])),
Instruction('operator': EndVerbatimBox, 'target': QubitSet([]))]
"""
if target_mapping and target is not None:
raise TypeError("Only one of 'target_mapping' or 'target' can be supplied.")
elif target is not None:
keys = sorted(verbatim_circuit.qubits)
values = target
target_mapping = dict(zip(keys, values))
if verbatim_circuit.result_types:
raise ValueError("Verbatim subcircuit is not measured and cannot have result types")
if verbatim_circuit.instructions:
self.add_instruction(Instruction(compiler_directives.StartVerbatimBox()))
for instruction in verbatim_circuit.instructions:
self.add_instruction(instruction, target_mapping=target_mapping)
self.add_instruction(Instruction(compiler_directives.EndVerbatimBox()))
self._has_compiler_directives = True
return self
def apply_gate_noise(
self,
noise: Union[Type[Noise], Iterable[Type[Noise]]],
target_gates: Optional[Union[Type[Gate], Iterable[Type[Gate]]]] = None,
target_unitary: np.ndarray = None,
target_qubits: Optional[QubitSetInput] = None,
) -> Circuit:
"""Apply `noise` to the circuit according to `target_gates`, `target_unitary` and
`target_qubits`.
For any parameter that is None, that specification is ignored (e.g. if `target_gates`
is None then the noise is applied after every gate in `target_qubits`).
If `target_gates` and `target_qubits` are both None, then `noise` is
applied to every qubit after every gate.
Noise is either applied to `target_gates` or `target_unitary`, so they cannot be
provided at the same time.
When `noise.qubit_count` == 1, ie. `noise` is single-qubit, `noise` is added to all
qubits in `target_gates` or `target_unitary` (or to all qubits in `target_qubits`
if `target_gates` is None).
When `noise.qubit_count` > 1 and `target_gates` is not None, the number of qubits of
any gate in `target_gates` must be the same as `noise.qubit_count`.
When `noise.qubit_count` > 1, `target_gates` and `target_unitary` is None, noise is
only applied to gates with the same qubit_count in target_qubits.
Args:
noise (Union[Type[Noise], Iterable[Type[Noise]]]): Noise channel(s) to be applied
to the circuit.
target_gates (Union[Type[Gate], Iterable[Type[Gate]], optional]): Gate class or
List of Gate classes which `noise` is applied to. Default=None.
target_unitary (np.ndarray): matrix of the target unitary gates. Default=None.
target_qubits (Union[QubitSetInput, optional]): Index or indices of qubit(s).
Default=None.
Returns:
Circuit: self
Raises:
TypeError:
If `noise` is not Noise type.
If `target_gates` is not a Gate type, Iterable[Gate].
If `target_unitary` is not a np.ndarray type.
If `target_qubits` has non-integers or negative integers.
IndexError:
If applying noise to an empty circuit.
If `target_qubits` is out of range of circuit.qubits.
ValueError:
If both `target_gates` and `target_unitary` are provided.
If `target_unitary` is not a unitary.
If `noise` is multi-qubit noise and `target_gates` contain gates
with the number of qubits not the same as `noise.qubit_count`.
Warning:
If `noise` is multi-qubit noise while there is no gate with the same
number of qubits in `target_qubits` or in the whole circuit when
`target_qubits` is not given.
If no `target_gates` or `target_unitary` exist in `target_qubits` or
in the whole circuit when they are not given.
Examples:
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ)
T : |0|1|2|
q0 : -X-Z-C-
|
q1 : -Y-X-X-
T : |0|1|2|
>>> noise = Noise.Depolarizing(probability=0.1)
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_gate_noise(noise, target_gates = Gate.X))
T : | 0 | 1 |2|
q0 : -X-DEPO(0.1)-Z-----------C-
|
q1 : -Y-----------X-DEPO(0.1)-X-
T : | 0 | 1 |2|
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_gate_noise(noise, target_qubits = 1))
T : | 0 | 1 | 2 |
q0 : -X-----------Z-----------C-----------
|
q1 : -Y-DEPO(0.1)-X-DEPO(0.1)-X-DEPO(0.1)-
T : | 0 | 1 | 2 |
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_gate_noise(noise,
... target_gates = [Gate.X,Gate.Y],
... target_qubits = [0,1])
... )
T : | 0 | 1 |2|
q0 : -X-DEPO(0.1)-Z-----------C-
|
q1 : -Y-DEPO(0.1)-X-DEPO(0.1)-X-
T : | 0 | 1 |2|
"""
# check whether gate noise is applied to an empty circuit
if not self.qubits:
raise IndexError("Gate noise cannot be applied to an empty circuit.")
# check if target_gates and target_unitary are both given
if (target_unitary is not None) and (target_gates is not None):
raise ValueError("target_unitary and target_gates cannot be input at the same time.")
# check target_qubits
target_qubits = check_noise_target_qubits(self, target_qubits)
if not all(qubit in self.qubits for qubit in target_qubits):
raise IndexError("target_qubits must be within the range of the current circuit.")
# make noise a list
noise = wrap_with_list(noise)
# make target_gates a list
if target_gates is not None:
target_gates = wrap_with_list(target_gates)
# remove duplicate items
target_gates = list(dict.fromkeys(target_gates))
for noise_channel in noise:
if not isinstance(noise_channel, Noise):
raise TypeError("Noise must be an instance of the Noise class")
# check whether target_gates is valid
if target_gates is not None:
check_noise_target_gates(noise_channel, target_gates)
if target_unitary is not None:
check_noise_target_unitary(noise_channel, target_unitary)
if target_unitary is not None:
return apply_noise_to_gates(self, noise, target_unitary, target_qubits)
else:
return apply_noise_to_gates(self, noise, target_gates, target_qubits)
def apply_initialization_noise(
self,
noise: Union[Type[Noise], Iterable[Type[Noise]]],
target_qubits: Optional[QubitSetInput] = None,
) -> Circuit:
"""Apply `noise` at the beginning of the circuit for every qubit (default) or target_qubits`.
Only when `target_qubits` is given can the noise be applied to an empty circuit.
When `noise.qubit_count` > 1, the number of qubits in target_qubits must be equal
to `noise.qubit_count`.
Args:
noise (Union[Type[Noise], Iterable[Type[Noise]]]): Noise channel(s) to be applied
to the circuit.
target_qubits (Union[QubitSetInput, optional]): Index or indices of qubit(s).
Default=None.
Returns:
Circuit: self
Raises:
TypeError:
If `noise` is not Noise type.
If `target_qubits` has non-integers or negative integers.
IndexError:
If applying noise to an empty circuit when `target_qubits` is not given.
ValueError:
If `noise.qubit_count` > 1 and the number of qubits in target_qubits is
not the same as `noise.qubit_count`.
Examples:
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ)
>>> noise = Noise.Depolarizing(probability=0.1)
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_initialization_noise(noise))
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_initialization_noise(noise, target_qubits = 1))
>>> circ = Circuit()
>>> print(circ.apply_initialization_noise(noise, target_qubits = [0, 1]))
"""
if (len(self.qubits) == 0) and (target_qubits is None):
raise IndexError(
"target_qubits must be provided in order to"
" apply the initialization noise to an empty circuit."
)
target_qubits = check_noise_target_qubits(self, target_qubits)
# make noise a list
noise = wrap_with_list(noise)
for noise_channel in noise:
if not isinstance(noise_channel, Noise):
raise TypeError("Noise must be an instance of the Noise class")
if noise_channel.qubit_count > 1 and noise_channel.qubit_count != len(target_qubits):
raise ValueError(
"target_qubits needs to be provided for this multi-qubit noise channel,"
" and the number of qubits in target_qubits must be the same as defined by"
" the multi-qubit noise channel."
)
return apply_noise_to_moments(self, noise, target_qubits, "initialization")
def apply_readout_noise(
self,
noise: Union[Type[Noise], Iterable[Type[Noise]]],
target_qubits: Optional[QubitSetInput] = None,
) -> Circuit:
"""Apply `noise` right before measurement in every qubit (default) or target_qubits`.
Only when `target_qubits` is given can the noise be applied to an empty circuit.
When `noise.qubit_count` > 1, the number of qubits in target_qubits must be equal
to `noise.qubit_count`.
Args:
noise (Union[Type[Noise], Iterable[Type[Noise]]]): Noise channel(s) to be applied
to the circuit.
target_qubits (Union[QubitSetInput, optional]): Index or indices of qubit(s).
Default=None.
Returns:
Circuit: self
Raises:
TypeError:
If `noise` is not Noise type.
If `target_qubits` has non-integers.
IndexError:
If applying noise to an empty circuit.
ValueError:
If `target_qubits` has negative integers.
If `noise.qubit_count` > 1 and the number of qubits in target_qubits is
not the same as `noise.qubit_count`.
Examples:
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ)
>>> noise = Noise.Depolarizing(probability=0.1)
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_initialization_noise(noise))
>>> circ = Circuit().x(0).y(1).z(0).x(1).cnot(0,1)
>>> print(circ.apply_initialization_noise(noise, target_qubits = 1))
>>> circ = Circuit()
>>> print(circ.apply_initialization_noise(noise, target_qubits = [0, 1]))
"""
if (len(self.qubits) == 0) and (target_qubits is None):
raise IndexError(
"target_qubits must be provided in order to"
" apply the readout noise to an empty circuit."
)
if target_qubits is None:
target_qubits = self.qubits
else:
if not isinstance(target_qubits, list):
target_qubits = [target_qubits]
if not all(isinstance(q, int) for q in target_qubits):
raise TypeError("target_qubits must be integer(s)")
if not all(q >= 0 for q in target_qubits):
raise ValueError("target_qubits must contain only non-negative integers.")
target_qubits = QubitSet(target_qubits)
# make noise a list
noise = wrap_with_list(noise)
for noise_channel in noise:
if not isinstance(noise_channel, Noise):
raise TypeError("Noise must be an instance of the Noise class")
if noise_channel.qubit_count > 1 and noise_channel.qubit_count != len(target_qubits):
raise ValueError(
"target_qubits needs to be provided for this multi-qubit noise channel,"
" and the number of qubits in target_qubits must be the same as defined by"
" the multi-qubit noise channel."
)
return apply_noise_to_moments(self, noise, target_qubits, "readout")
def add(self, addable: AddableTypes, *args, **kwargs) -> Circuit:
"""
Generic add method for adding item(s) to self. Any arguments that
`add_circuit()` and / or `add_instruction()` and / or `add_result_type`
supports are supported by this method. If adding a
subroutine, check with that subroutines documentation to determine what
input it allows.
Args:
addable (AddableTypes): The item(s) to add to self. Default = `None`.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
Circuit: self
Raises:
TypeError: If `addable` is an unsupported type
See Also:
`add_circuit()`
`add_instruction()`
`add_result_type()`
Examples:
>>> circ = Circuit().add([Instruction(Gate.H(), 4), Instruction(Gate.CNot(), [4, 5])])
>>> circ = Circuit().add([ResultType.StateVector()])
>>> circ = Circuit().h(4).cnot([4, 5])
>>> @circuit.subroutine()
>>> def bell_pair(target):
... return Circuit().h(target[0]).cnot(target[0: 2])
...
>>> circ = Circuit().add(bell_pair, [4,5])
"""
def _flatten(addable):
if isinstance(addable, Iterable):
for item in addable:
yield from _flatten(item)
else:
yield addable
for item in _flatten(addable):
if isinstance(item, Instruction):
self.add_instruction(item, *args, **kwargs)
elif isinstance(item, ResultType):
self.add_result_type(item, *args, **kwargs)
elif isinstance(item, Circuit):
self.add_circuit(item, *args, **kwargs)
elif callable(item):
self.add(item(*args, **kwargs))
else:
raise TypeError(f"Cannot add a '{type(item)}' to a Circuit")
return self
def diagram(self, circuit_diagram_class=AsciiCircuitDiagram) -> str:
"""
Get a diagram for the current circuit.
Args:
circuit_diagram_class (Class, optional): A `CircuitDiagram` class that builds the
diagram for this circuit. Default = `AsciiCircuitDiagram`.
Returns:
str: An ASCII string circuit diagram.
"""
return circuit_diagram_class.build_diagram(self)
def to_ir(self) -> Program:
"""
Converts the circuit into the canonical intermediate representation.
If the circuit is sent over the wire, this method is called before it is sent.
Returns:
(Program): An AWS quantum circuit description program in JSON format.
"""
ir_instructions = [instr.to_ir() for instr in self.instructions]
ir_results = [result_type.to_ir() for result_type in self.result_types]
ir_basis_rotation_instructions = [
instr.to_ir() for instr in self.basis_rotation_instructions
]
return Program.construct(
instructions=ir_instructions,
results=ir_results,
basis_rotation_instructions=ir_basis_rotation_instructions,
)
def as_unitary(self) -> np.ndarray:
"""
Returns the unitary matrix representation of the entire circuit.
*Note*: The performance of this method degrades with qubit count. It might be slow for
qubit count > 10.
Returns:
np.ndarray: A numpy array with shape (2^qubit_count, 2^qubit_count) representing the
circuit as a unitary. *Note*: For an empty circuit, an empty numpy array is
returned (`array([], dtype=complex128)`)
Raises:
TypeError: If circuit is not composed only of `Gate` instances,
i.e. a circuit with `Noise` operators will raise this error.
Examples:
>>> circ = Circuit().h(0).cnot(0, 1)
>>> circ.as_unitary()
array([[ 0.70710678+0.j, 0.70710678+0.j, 0. +0.j,
0. +0.j],
[ 0. +0.j, 0. +0.j, 0.70710678+0.j,
-0.70710678+0.j],
[ 0. +0.j, 0. +0.j, 0.70710678+0.j,
0.70710678+0.j],
[ 0.70710678+0.j, -0.70710678+0.j, 0. +0.j,
0. +0.j]])
"""
qubits = self.qubits
if not qubits:
return np.zeros(0, dtype=complex)
qubit_count = max(qubits) + 1
return calculate_unitary(qubit_count, self.instructions)
@property
def qubits_frozen(self) -> bool:
"""bool: Whether the circuit's qubits are frozen, that is, cannot be remapped.
This may happen because the circuit contains compiler directives preventing compilation
of a part of the circuit, which consequently means that none of the other qubits can be
rewired either for the program to still make sense.
"""
return self._has_compiler_directives
@property
def observables_simultaneously_measurable(self) -> bool:
"""bool: Whether the circuit's observables are simultaneously measurable
If this is False, then the circuit can only be run when shots = 0, as sampling (shots > 0)
measures the circuit in the observables' shared eigenbasis.
"""
return self._observables_simultaneously_measurable
def _encounter_noncommuting_observable(self):
self._observables_simultaneously_measurable = False
# No longer simultaneously measurable, so no need to track
self._qubit_observable_mapping.clear()
self._qubit_observable_target_mapping.clear()
def _copy(self) -> Circuit:
copy = Circuit().add(self.instructions)
copy.add(self.result_types)
return copy
def copy(self) -> Circuit:
"""
Return a shallow copy of the circuit.
Returns:
Circuit: A shallow copy of the circuit.
"""
return self._copy()
def __iadd__(self, addable: AddableTypes) -> Circuit:
return self.add(addable)
def __add__(self, addable: AddableTypes) -> Circuit:
new = self._copy()
new.add(addable)
return new
def __repr__(self) -> str:
if not self.result_types:
return f"Circuit('instructions': {list(self.instructions)})"
else:
return (
f"Circuit('instructions': {list(self.instructions)}"
+ f", 'result_types': {self.result_types})"
)
def __str__(self):
return self.diagram(AsciiCircuitDiagram)
def __eq__(self, other):
if isinstance(other, Circuit):
return (
list(self.instructions) == list(other.instructions)
and self.result_types == other.result_types
)
return NotImplemented
def subroutine(register=False):
"""
Subroutine is a function that returns instructions, result types, or circuits.
Args:
register (bool, optional): If `True`, adds this subroutine into the `Circuit` class.
Default = `False`.
Examples:
>>> @circuit.subroutine(register=True)
>>> def bell_circuit():
... return Circuit().h(0).cnot(0, 1)
...
>>> circ = Circuit().bell_circuit()
>>> for instr in circ.instructions:
... print(instr)
...
Instruction('operator': 'H', 'target': QubitSet(Qubit(0),))
Instruction('operator': 'H', 'target': QubitSet(Qubit(1),))
"""
def subroutine_function_wrapper(func: Callable[..., SubroutineReturn]) -> SubroutineReturn:
if register:
Circuit.register_subroutine(func)
return func
return subroutine_function_wrapper
|
#!/usr/bin/python
import sys
def gcd(a,b):
"""Compute the greatest common divisor of a and b"""
while b > 0:
a, b = b, a % b
return a
def lcm(a, b):
"""Compute the lowest common multiple of a and b"""
return a * b / gcd(a, b)
class Place(object):
def __init__(self, name, popularity, visits):
self.name = name
self.popularity = popularity
self.visits = visits
def __lt__(self, other):
return (self.visits, self.popularity) < (other.visits, other.popularity)
def __repr__(self):
return self.name
def simulate(S, R, K, N):
for i in xrange(0, R):
for j in xrange(0, N):
S[j].visits += 1
S = S[N:] + S[0:N]
return S
def run(filename = 'sample1.in'):
with open(filename) as f:
T = int(f.readline().replace('\n', ''))
for i in xrange(0, T):
K, N, V = [int(n) for n in f.readline().replace('\n', '').split(' ')]
S = [Place(f.readline().replace('\n', ''), n, 0) for n in xrange(0, K)]
LCM = lcm(K, N)
C = LCM/N
R = (V) % C
#print K, N, V, S, C, LCM, R
S = simulate(S, R, K, N)
print("Case #{}: {}".format(i+1, " ".join([p.name for p in sorted(S[K-N:], key=lambda x: x.popularity)])))
if __name__ == '__main__':
filename = None
if len(sys.argv) == 2:
run(sys.argv[1])
else:
run()
|
import numpy as np
from numpy.testing import assert_allclose
from openamundsen.tridiag import solve_tridiag
from scipy.sparse import diags
def test_solve_tridiag():
N = 100
a = np.append([0], np.random.rand(N - 1))
b = np.random.rand(N)
c = np.append(np.random.rand(N - 1), [0])
x = np.random.rand(N)
A = diags([a[1:], b, c[:-1]], [-1, 0, 1])
d = A @ x
x_solved = solve_tridiag(a, b, c, d)
assert_allclose(x_solved, x)
|
"""
Parsing ASN1_flat format files from dbSNP
Currently no used for loading dbsnp data, using dbsnp_vcf_parser instead.
Chunlei Wu
"""
from __future__ import print_function
import re
import glob
import os.path
from utils.common import anyfile
from utils.dataload import rec_handler
assembly_d = {
'GRCh38': 'hg38',
'GRCh37': 'hg19'
}
loctype_d = {
'1': 'insertion',
'2': 'exact',
'3': 'deletion',
'4': 'range-insertion',
'5': 'range-exact',
'6': 'range-deletion'
}
class ParsingError(Exception):
pass
class dbSNPASN1FlatParser:
'''Parsing dbSNP ASN1_flat file.'''
def __init__(self, verbose=True):
self.verbose = verbose
def parse(self, infile):
print(os.path.split(infile)[1])
cnt = 0
err_d = {}
_f = anyfile(infile)
ff = rec_handler(_f)
for rec in ff:
if not rec.startswith('rs'):
continue
doc = self.parse_one_record(rec)
if isinstance(doc, dict):
cnt += 1
yield doc
else:
if doc in err_d:
err_d[doc] += 1
else:
err_d[doc] = 1
print(cnt, err_d)
def test(self, infile):
_f = anyfile(infile)
ff = rec_handler(_f)
gd = []
err_cnt = 0
for rec in ff:
if not rec.startswith('rs'):
continue
lines = rec.strip().split('\n')
self._parse_rsline(lines)
d = self._parse_GMAF(lines)
if not d:
err_cnt += 1
gd.append(d)
print(err_cnt)
return gd
def parse_one_record(self, record):
snp_d = {}
lines = record.strip().split('\n')
snp_d.update(self._parse_rsline(lines) or {})
snp_d.update(self._parse_SNP(lines) or {})
snp_d.update(self._parse_VAL(lines) or {})
snp_d.update(self._parse_GMAF(lines) or {})
snp_d.update(self._parse_CTG(lines) or {})
return snp_d
def _parse_rsline(self, rec_lines):
'''parsing RS line'''
snp_d = {}
rs_line = rec_lines[0].split(' | ')
rsid = rs_line[0]
self.current_rsid = rsid
snp_d['rsid'] = rsid
assert re.match('rs\d+', rsid)
snp_d['snpclass'] = rs_line[3]
snp_d['genotype'] = rs_line[4] == 'YES'
return snp_d
def _parse_SNP(self, rec_lines):
'''Parsing SNP line from one ASN1_Flat record.'''
snp_d = {}
snp_line = [line for line in rec_lines if line.startswith('SNP')][0]
snp_line = [x.split('=') for x in snp_line.split(' | ')]
allele_li = snp_line[1][1].strip("'").split('/')
if len(allele_li) == 2:
allele1, allele2 = allele_li
snp_d.update({
'allele1': allele1,
'allele2': allele2
})
else:
# here we ignore those with > 2 alleles
if self.verbose:
print(self.current_rsid, snp_line[1][1])
return # -1
if snp_line[2][1] != '?':
het = float(snp_line[2][1])
het_se = float(snp_line[3][1])
snp_d['het'] = {
'value': het,
'se': het_se
}
if len(snp_line) == 5:
allele_origin = snp_line[4][1]
# pat = "(.+)\((.+)\)/(.+)\((.+)\)"
# grp = re.match(pat, allele_orgin).groups()
# assert len(grp) == 4
pat = "(.*)\((.+)\)"
d = []
for x in allele_origin.split('/'):
mat = re.match(pat, x)
if mat:
d.append(mat.groups())
else:
if self.verbose:
print(self.current_rsid, allele_origin)
return # -2
d = dict(d)
if '' in d:
d['-'] = d['']
del d['']
snp_d['allele_origin'] = d
return snp_d
def _parse_VAL(self, rec_lines):
"""parsing VAL line, should have one line in one record."""
snp_d = {}
val_line = [line for line in rec_lines if line.startswith('VAL')][0]
k, v = val_line.split(' | ')[1].split('=')
assert k == 'validated'
assert (v == 'YES' or v == 'NO')
snp_d['validated'] = v == 'YES'
return snp_d
def _parse_GMAF(self, rec_lines):
'''GMAF line is optional, and can be multi-lines.'''
snp_d = {}
gmaf_lines = [line for line in rec_lines if line.startswith('GMAF')]
if len(gmaf_lines) > 0:
_gmaf = []
for gmaf_line in gmaf_lines:
gmaf_line = [x.split('=') for x in gmaf_line.split(' | ')]
_gmaf.append({
"allele": gmaf_line[1][1],
"count": gmaf_line[2][1],
"freq": gmaf_line[3][1]
})
snp_d['gmaf'] = _gmaf
return snp_d
def _parse_CTG(self, rec_lines):
'''parsing CTG lines, can have multiple lines'''
snp_d = {}
ctg_line = [line for line in rec_lines if line.startswith('CTG')]
if ctg_line:
ctg_line = ctg_line[0]
else:
if self.verbose:
print(self.current_rsid, "missing CTG line")
return # -4
ctg_line = [x.split('=') for x in ctg_line.split(' | ')]
assembly_key = assembly_d[ctg_line[1][1].split('.')[0]]
chrom = ctg_line[2][1]
pos = ctg_line[3][1]
if pos != '?':
pos = int(pos)
else:
if self.verbose:
print(self.current_rsid, 'pos=?')
return # -3
pos_d = {
assembly_key: {
'start': pos,
'end': pos + 1
}
}
ctg_d = {
'contig': {
'accession': ctg_line[4][0],
'start': int(ctg_line[5][1]),
'end': int(ctg_line[6][1])
}
}
loctype = loctype_d[ctg_line[7][1]]
strand = ctg_line[8][1]
snp_d.update(pos_d)
snp_d.update(ctg_d)
snp_d.update({
'chrom': chrom,
'strand': strand,
'loctype': loctype
})
return snp_d
def load_data(path):
parser = dbSNPASN1FlatParser()
for fn in glob.glob(path):
print(os.path.split(fn)[1])
for doc in parser.parse(fn):
yield doc
|
# -*- coding: utf-8 -*-
"""
1985. Find the Kth Largest Integer in the Array
https://leetcode.com/problems/find-the-kth-largest-integer-in-the-array/
Example 1:
Input: nums = ["3","6","7","10"], k = 4
Output: "3"
Explanation:
The numbers in nums sorted in non-decreasing order are ["3","6","7","10"].
The 4th largest integer in nums is "3".
Example 2:
Input: nums = ["2","21","12","1"], k = 3
Output: "2"
Explanation:
The numbers in nums sorted in non-decreasing order are ["1","2","12","21"].
The 3rd largest integer in nums is "2".
Example 3:
Input: nums = ["0","0"], k = 2
Output: "0"
Explanation:
The numbers in nums sorted in non-decreasing order are ["0","0"].
The 2nd largest integer in nums is "0".
"""
from typing import List
class Solution:
def kthLargestNumber(self, nums: List[str], k: int) -> str:
"""
TC: O(NlogN) / SC: O(N)
"""
return str(sorted(map(int, nums), reverse=True)[k - 1])
|
"""
file helpers
~~~~~~~~~~~~~
A set of various filesystem helpers.
:copyright: (c) 2016 by Dusty Gamble.
:license: MIT, see LICENSE for more details.
"""
__version__ = '1.0.2'
import os
import shutil
import hashlib
from natsort import natsorted
def file_extension(filepath):
filename, file_extension = os.path.splitext(filepath)
return file_extension
def absolute_delete(filepath):
try:
os.remove(filepath)
except OSError:
try:
shutil.rmtree(filepath, ignore_errors=True)
except OSError:
pass
def get_dir_contents_filepaths(dirname):
files = []
dirname += '/'
try:
for filename in os.listdir(dirname):
filepath = os.path.normpath(dirname + filename)
files.append(filepath)
except OSError:
pass
return files
def delete_dir_extra_files(dirname, needs_files):
has_files = [os.path.normpath(f) for f in get_dir_contents_filepaths(dirname)]
needs_files = [os.path.normpath(f) for f in needs_files]
extra_files = set(has_files) - set(needs_files)
for filepath in extra_files:
absolute_delete(filepath)
return extra_files
def get_dir_symlinks(dirname, recursive=False):
# Reference: http://stackoverflow.com/questions/6184849
symlinks = {}
for filepath in get_dir_contents_filepaths(dirname, ):
try:
if os.path.islink(filepath):
symlinks[filepath] = os.path.realpath(filepath)
except OSError:
# If the file was deleted before we inspected it, don't bother.
pass
return symlinks
def md5_file(filepath):
hash_md5 = hashlib.md5()
with open(filepath, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def get_subdirs(root_dirpath):
dirs = []
try:
for filename in os.listdir(root_dirpath):
filepath = root_dirpath + filename
if os.path.isdir(filepath):
dirs.append(filepath)
dirs = natsorted(dirs)
except FileNotFoundError:
pass
return dirs
def get_dir_files(dirname, *, extensions=None):
"""
:param str dirname: path to directory
:param list extensions: list of extensions to look for
:return: a list of filepaths
"""
filepaths = []
try:
for filename in os.listdir(dirname):
if extensions and os.path.splitext(filename)[1].lower() not in extensions:
continue
filepath = os.path.join(dirname, filename)
filepaths.append(filepath)
filepaths = natsorted(filepaths)
except FileNotFoundError:
pass
return filepaths
def least_common_directory(files):
'''
Finds the closest directory that all files share.
'''
for filepath in files:
pass
raise NotImplementedError
|
import os
import configparser
class Config(object):
def __init__(self, filename):
self.__conf = configparser.ConfigParser()
self.__conf.read(filename, encoding='GBK')
def getConfig(self, section, item):
try:
itemDict = dict(self.__conf.items(section))
if item in itemDict:
return itemDict[item]
return None
except Exception as e:
return None
if __name__ == '__main__':
conf = Config(os.getcwd() + '/conf/conf.ini')
print(conf.getConfig('APP', 'name'))
print(conf.getConfig('USER', 'name'))
print(conf.getConfig('TITLE', 'title1'))
print(conf.getConfig('TITLE', 'title2'))
print(conf.getConfig('TITLE', 'title3'))
|
# Author: OMKAR PATHAK
# This program illustrates a simple Python encryption example using the RSA Algotrithm
# RSA is an algorithm used by modern computers to encrypt and decrypt messages. It is an asymmetric
# cryptographic algorithm. Asymmetric means that there are two different keys (public and private).
# For installation: sudo pip3 install pycrypto
from Crypto.PublicKey import RSA
from Crypto import Random
randomGenerator = Random.new().read
# Generating a private key and a public key
# key stores both the keys
key = RSA.generate(1024, randomGenerator) # 1024 is the size of the key in bits
print(key) # Prints private key
print(key.publickey()) # Prints public key
# Encryption using Public Key
publicKey = key.publickey()
encryptedData = publicKey.encrypt('My name is Omkar Pathak'.encode('utf-8'), 32)
print(encryptedData)
# Decryption using Private Key
decryptedData = key.decrypt(encryptedData)
print(decryptedData)
|
#This is a template code. Please save it in a proper .py file.
import rtmaps.types
import numpy as np
from rtmaps.base_component import BaseComponent # base class
from sklearn.utils import shuffle
import cv2
import pickle
import os
import random
import matplotlib.pyplot as plt
import tensorflow as tf
import pandas as pd
from time import time
import matplotlib.image as mpimg
import csv
import sys
from random import shuffle
from time import time
from sklearn.utils import shuffle
# Python class that will be called from RTMaps.
class rtmaps_python(BaseComponent):
def __init__(self):
BaseComponent.__init__(self) # call base class constructor
#self.add_input("in", rtmaps.types.ANY) # define input
#self.add_output("out", rtmaps.types.AUTO) # define output
# Birth() will be called once at diagram execution startup
def Birth(self):
print("Python Birth")
# Core() is called every time you have a new input
def Core(self):
#out = self.inputs["in"].ioelt # create an ioelt from the input
#self.outputs["out"].write(out) # and write it to the output
training_file = '/home/bluebox/akash/data/train.p'
#
testing_file = '/home/bluebox/akash/data/test.p'
#
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
#
X_train, y_train = train['features'], train['labels']
n_classes = len(np.unique(y_train))
def Model(x):
mu = 0
sigma = 0.1
keep_prob = 0.9
strides_conv = [1, 1, 1, 1]
strides_pool = [1, 2, 2, 1]
#________________________________Layer 1__________________________________________________
# Convolutional. Input = 32x32x1. Filter = 5x5x1. Output = 28x28x6.
# Arguments used for tf.truncated_normal, randomly defines variables for the weights and biases for each layer
conv1_W = tf.Variable(tf.random_normal(shape=(5, 5, 1, 6), mean = mu, stddev = sigma))
conv1_b = tf.Variable(tf.zeros(6))
conv1 = tf.nn.conv2d(x, conv1_W, strides=strides_conv, padding='VALID') + conv1_b
# Apply activation function
conv1 = tf.nn.relu(conv1)
#conv1 = tf.nn.dropout(conv1, keep_prob)
#________________________________Layer 2__________________________________________________
# Convolutional. Input = 28x28x6. Filter = 3x3x6. Output = 14x14x6.
conv2_W = tf.Variable(tf.random_normal(shape=(3, 3, 6, 12), mean = mu, stddev = sigma))
conv2_b = tf.Variable(tf.zeros(12))
conv2 = tf.nn.conv2d(conv1, conv2_W, strides=strides_conv, padding='SAME') + conv2_b
# Pooling. Input = 28x28x6. Output = 14x14x6.
conv2 = tf.nn.max_pool(conv2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
#________________________________Layer 3__________________________________________________
# Convolutional. Input = 14x14x6. Filter = 5x5x12. Output = 10x10x16.
conv3_W = tf.Variable(tf.truncated_normal(shape=(5, 5, 12, 16), mean = mu, stddev = sigma))
conv3_b = tf.Variable(tf.zeros(16))
conv3 = tf.nn.conv2d(conv2, conv3_W, strides=strides_conv, padding='VALID') + conv3_b
# Apply activation function
conv3 = tf.nn.relu(conv3)
# Pooling. Input = 10x10x16. Output = 5x5x16.
conv3 = tf.nn.max_pool(conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID')
#________________________________Layer 4__________________________________________________
# Flatten. Input = 5x5x16. Output = 400.
fc0 = tf.reshape(conv3, [-1, int(5*5*16)])
fc0 = tf.nn.dropout(fc0, keep_prob)
# Fully Connected. Input = 400. Output = 120.
fc1_W = tf.Variable(tf.truncated_normal(shape=(400, 120), mean = mu, stddev = sigma))
fc1_b = tf.Variable(tf.zeros(120))
fc1 = tf.matmul(fc0, fc1_W) + fc1_b
# Apply activation function
fc1 = tf.nn.relu(fc1)
fc1 = tf.nn.dropout(fc1, keep_prob)
#________________________________Layer 5__________________________________________________
# Fully Connected. Input = 120. Output = 84.
fc2_W = tf.Variable(tf.truncated_normal(shape=(120, 84), mean = mu, stddev = sigma))
fc2_b = tf.Variable(tf.zeros(84))
fc2 = tf.matmul(fc1, fc2_W) + fc2_b
# Apply activation function
fc2 = tf.nn.relu(fc2)
fc2 = tf.nn.dropout(fc2, keep_prob)
#________________________________Layer 6__________________________________________________
# Fully Connected. Input = 84. Output = 43.
fc3_W = tf.Variable(tf.truncated_normal(shape=(84, n_classes), mean = mu, stddev = sigma))
fc3_b = tf.Variable(tf.zeros(n_classes))
logits = tf.matmul(fc2, fc3_W) + fc3_b
return logits
x = tf.placeholder(tf.float32, (None, 32, 32, 1))
y = tf.placeholder(tf.int32, (None))
one_hot_y = tf.one_hot(y, n_classes)
training_results = pd.read_csv('/home/bluebox/akash/accuracy_per_epoch.csv')
logits = Model(x)
images = []
images_orig = []
folder = "/home/bluebox/akash/new_images/"
for image_name in os.listdir(folder):
#reading in an image and resize it
image = mpimg.imread(folder + image_name)
images_orig.append(image)
image = cv2.imread(folder + image_name,0)
image = cv2.resize(image, (32,32))
image = image/255
images.append(image)
X_data = np.asarray(images)
X_data = X_data.reshape((len(images),32,32,1))
print ("New images after reshape: ", X_data.shape)
signs=[]
with open('/home/bluebox/akash/signnames.csv', 'rt') as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
signs.append((row['SignName']))
with tf.Session() as sess:
sess = tf.get_default_session()
tf.train.Saver().restore(sess, tf.train.latest_checkpoint('/home/bluebox/akash/.'))
signs_classes = sess.run(tf.argmax(logits, 1), feed_dict={x: X_data})
for i in range(len(images)):
print(signs[signs_classes[i]])
plt.axis('off')
sess.close()
# Death() will be called once at diagram execution shutdown
def Death(self):
pass
|
import frappe
from frappe.utils import getdate, cint, cstr, random_string, now_datetime
import datetime
def response(message, status_code, data=None, error=None):
"""This method generates a response for an API call with appropriate data and status code.
Args:
message (str): Message to be shown depending upon API result. Eg: Success/Error/Forbidden/Bad Request.
status_code (int): Status code of API response.
data (Any, optional): Any data to be passed as response (Dict, List, etc). Defaults to None.
"""
frappe.local.response["message"] = message
frappe.local.response["http_status_code"] = status_code
frappe.local.response["status_code"] = status_code
if data:
frappe.local.response["data"] = data
elif error:
frappe.local.response["error"] = error
return
@frappe.whitelist()
def get_current_user_details():
user = frappe.session.user
user_roles = frappe.get_roles(user)
user_employee = frappe.get_value("Employee", {"user_id": user}, ["name", "employee_id", "employee_name", "image", "enrolled", "designation"], as_dict=1)
return user, user_roles, user_employee
def setup_directories():
"""
Use this method to create directories needed for the face recognition system: dataset directory and facial embeddings
"""
from pathlib import Path
Path(frappe.utils.cstr(frappe.local.site)+"/private/files/user/").mkdir(parents=True, exist_ok=True)
Path(frappe.utils.cstr(frappe.local.site)+"/private/files/dataset/").mkdir(parents=True, exist_ok=True)
Path(frappe.utils.cstr(frappe.local.site)+"/private/files/facial_recognition/").mkdir(parents=True, exist_ok=True)
Path(frappe.utils.cstr(frappe.local.site)+"/private/files/face_rec_temp/").mkdir(parents=True, exist_ok=True)
Path(frappe.utils.cstr(frappe.local.site)+"/private/files/dataset/"+frappe.session.user+"/").mkdir(parents=True, exist_ok=True)
def validate_date(date: str) -> bool:
"""This method validates a date to be in yyyy-mm-dd format.
Args:
date (str): date string
Returns:
bool: True/False based on valid date string
"""
if "-" not in date:
return False
date_elements = date.split("-")
if len(date_elements) != 3:
return False
year = date_elements[0]
month = date_elements[1]
day = date_elements[2]
if len(year) != 4:
return False
if len(month) > 2:
return False
if int(month) > 12 or int(month) < 1:
return False
if len(day) > 2:
return False
if int(day) > 31 or int(day) < 1:
return False
return True
def validate_time(time: str) -> bool:
"""This method validates time to be in format hh:mm:ss
Args:
time (str): time string.
Returns:
bool: True/False based on valid time string
"""
if ":" not in time:
return False
time_elements = time.split(":")
if len(time_elements) != 3:
return False
hour = time_elements[0]
minutes = time_elements[1]
seconds = time_elements[2]
if len(hour) != 2 or len(minutes) != 2 or len(minutes) != 2 or len(seconds) != 2:
return False
if int(hour) > 23 or int(hour) < 0:
return False
if int(minutes) > 59 or int(minutes) < 0:
return False
if int(seconds) > 59 or int(seconds) < 0:
return False
return True
@frappe.whitelist()
def get_current_shift(employee):
try:
current_datetime = now_datetime().strftime("%Y-%m-%d %H:%M:%S")
date, time = current_datetime.split(" ")
shifts = frappe.get_list("Shift Assignment", {"employee":employee, 'start_date': ['>=', date]}, ["shift", "shift_type"])
if len(shifts) > 0:
for shift in shifts:
time = time.split(":")
time = datetime.timedelta(hours=cint(time[0]), minutes=cint(time[1]), seconds=cint(time[2]))
shift_type, start_time, end_time ,before_time, after_time= frappe.get_value("Shift Type", shift.shift_type, ["shift_type","start_time", "end_time","begin_check_in_before_shift_start_time","allow_check_out_after_shift_end_time"])
#include early entry and late exit time
start_time = start_time - datetime.timedelta(minutes=before_time)
end_time = end_time + datetime.timedelta(minutes=after_time)
if shift_type == "Night":
if start_time <= time >= end_time or start_time >= time <= end_time:
return shift
else:
if start_time <= time <= end_time:
return shift
elif len(shifts)==0:
return shifts
else:
return shifts[0].shift
except Exception as e:
print(frappe.get_traceback())
return frappe.utils.response.report_error(e.http_status_code)
|
import tempfile
import time
import os
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
def cap_to_file(capsys, write_to):
out = capsys.readouterr().out
with open(write_to, "a") as f:
f.write(out)
def test_barrier(capsys):
"""Verify that barrier blocks execution by printing messages before and after"""
from mpi4jax._src.flush import flush
from mpi4jax import barrier
# pipe all messages to the same file
tmpdir = tempfile.gettempdir()
write_to = os.path.join(tmpdir, "mpi4jax-barrier.txt")
if rank == 0:
with open(write_to, "w"):
pass
print(f"r{rank} | start")
time.sleep(rank * 0.2)
cap_to_file(capsys, write_to)
# without a barrier here, some ranks would start writing
# "done" before everyone has writen "start"
token = barrier() # noqa: F841
flush("cpu")
print(f"r{rank} | done")
time.sleep(rank * 0.2)
cap_to_file(capsys, write_to)
time.sleep(size * 0.2)
with open(write_to, "r") as f:
outputs = f.readlines()
assert len(outputs) == size * 2
assert all(o.endswith("start\n") for o in outputs[:size])
assert all(o.endswith("done\n") for o in outputs[size:])
|
keyboard.send_keys("<shift>+<escape>")
|
from django.db import models
import uuid
from django.contrib.auth.models import User
# When create a new model, run migration as follow:
# python3 manage.py makemigrations
# python3 manage.py migrate
class Author(models.Model):
# https://docs.djangoproject.com/en/2.1/topics/db/examples/one_to_one/
# https://simpleisbetterthancomplex.com/tutorial/2016/07/22/how-to-extend-django-user-model.html By Vitor Freitas
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.ForeignKey(User, on_delete=models.CASCADE, editable=False)
displayName = models.CharField(max_length=128)
github = models.URLField(null=True, blank=False)
host = models.URLField()
def __str__(self):
return self.displayName
class Friend(models.Model):
#https://briancaffey.github.io/2017/07/19/different-ways-to-build-friend-models-in-django.html by Brian Caffey
#https://stackoverflow.com/questions/2201598/how-to-define-two-fields-unique-as-couple answered Feb 4 '10 at 17:16 Jens, edited Jun 16 '14 at 20:50 Mark Mikofski
friednStatusChoise = (
('Accept', 'Accept'),
('Decline', 'Decline'),
('Pending', 'Pending'),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
author = models.ForeignKey(Author, related_name='sender',on_delete=models.CASCADE, editable=False)
friend = models.ForeignKey(Author, related_name='reciver',on_delete=models.CASCADE, editable=False)
status = models.CharField(max_length=32, choices=friednStatusChoise, default='Pending')
last_modified_time = models.DateTimeField(auto_now_add=True, editable=False)
class Meta:
unique_together = ('author', 'friend',)
def __str__(self):
return "Friend request from %s to %s"%(self.author, self.friend)
class Post(models.Model):
# https://stackoverflow.com/questions/18676156/how-to-properly-use-the-choices-field-option-in-django answered Sep 18 '15 at 17:19 JCJS
contentTypeChoice = (
('text/markdown', 'text/markdown'),
('text/plain', 'text/plain'),
('application/base64', 'application/base64'),
('image/png;base64', 'image/png;base64'),
('image/jpeg;base64', 'image/jpeg;base64'),
)
visibilityType =(
('PUBLIC', 'PUBLIC'),
('FOAF', 'FOAF'),
('FRIENDS', 'FRIENDS'),
('PRIVATE', 'PRIVATE'),
('SERVERONLY', 'SERVERONLY'),
)
postid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=400)
source = models.URLField(null=True, blank=True)
origin = models.URLField(null=True, blank=True)
content = models.TextField()
categories = models.TextField(null=True, blank=True)
contentType = models.CharField(max_length=32, choices=contentTypeChoice)
author = models.ForeignKey(Author, related_name='post_author', on_delete=models.CASCADE)
visibility = models.CharField(max_length=32, choices=visibilityType)
visibleTo = models.TextField(null=True, blank=True)
description = models.TextField()
#https://stackoverflow.com/questions/5190313/django-booleanfield-how-to-set-the-default-value-to-true answered Mar 4 '11 at 6:29 Michael C. O'Connor
unlisted = models.BooleanField(default=False)
published = models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.title
class Comment(models.Model):
contentTypeChoice = (
('text/markdown', 'text/markdown'),
('text/plain', 'text/plain'),
('application/base64', 'application/base64'),
('image/png;base64', 'image/png;base64'),
('image/jpeg;base64', 'image/jpeg;base64'),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
postid = models.UUIDField(default=uuid.uuid4)
author = models.ForeignKey(Author, related_name='comment_author', on_delete=models.CASCADE)
comment = models.CharField(max_length=400)
contentType = models.CharField(max_length=32, choices=contentTypeChoice)
published = models.DateTimeField(auto_now_add=True, blank=True)
def __str__(self):
return self.comment
class Node(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
host = models.CharField(max_length=400)
shareImages = models.BooleanField(default=True)
sharePost = models.BooleanField(default=True)
nodeUser = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.host
class RemoteUser(models.Model):
node = models.ForeignKey(Node, related_name='related_node', on_delete=models.CASCADE)
remoteUsername = models.CharField(max_length=400, null=True, blank=True)
remotePassword = models.CharField(max_length=400, null=True, blank=True)
def __str__(self):
return self.node.host
|
from jinja2 import Markup
def inertia(page_data, app_id="app"):
"""Inertia view helper to render a div with page data required by client-side
Inertia.js adapter."""
return Markup("<div id='{0}' data-page='{1}'></div>".format(app_id, page_data))
|
import io
import json
import shutil
import zipfile
from django.core import mail
from django.test import SimpleTestCase
from rest_framework.reverse import reverse
from rest_framework.test import APIClient
from config.settings.base import OSF_TEST_USER_TOKEN
from presqt.api_v1.utilities import hash_tokens
from presqt.api_v1.utilities.fixity.download_fixity_checker import download_fixity_checker
from presqt.utilities import write_file, read_file
from presqt.targets.utilities import shared_call_get_resource_zip
class TestDownloadJobGET(SimpleTestCase):
"""
Test the `api_v1/job_status/download/` endpoint's GET method.
Testing only PresQT core code.
"""
def setUp(self):
self.client = APIClient()
self.header = {'HTTP_PRESQT_SOURCE_TOKEN': OSF_TEST_USER_TOKEN,
'HTTP_PRESQT_EMAIL_OPT_IN': ''}
self.resource_id = '5cd98510f244ec001fe5632f'
self.target_name = 'osf'
self.hashes = {
"sha256": "3e517cda95ddbfcb270ab273201517f5ae0ee1190a9c5f6f7e6662f97868366f",
"md5": "9e79fdd9032629743fca52634ecdfd86"}
self.ticket_number = hash_tokens(OSF_TEST_USER_TOKEN)
self.token = OSF_TEST_USER_TOKEN
def test_success_200_zip(self):
"""
Return a 200 along with a zip file of the resource requested.
"""
shared_call_get_resource_zip(self, self.resource_id)
url = reverse('job_status', kwargs={'action': 'download', 'response_format': 'zip'})
response = self.client.get(url, **self.header)
# Verify the status code
self.assertEqual(response.status_code, 200)
zip_file = zipfile.ZipFile(io.BytesIO(response.content))
# Verify the name of the zip file
self.assertEquals(
response._headers['content-disposition'][1],
'attachment; filename=osf_download_{}.zip'.format(self.resource_id))
# Verify content type
self.assertEqual(response._headers['content-type'][1], 'application/zip')
# Verify the number of resources in the zip is correct
self.assertEqual(len(zip_file.namelist()), 13)
# Verify the custom hash_file information is correct
with zip_file.open('osf_download_{}/fixity_info.json'.format(self.resource_id)) as fixityfile:
zip_json = json.load(fixityfile)[0]
self.assertEqual(zip_json['fixity'], True)
self.assertEqual(zip_json['fixity_details'],
'Source Hash and PresQT Calculated hash matched.')
self.assertIn(zip_json['hash_algorithm'], ['sha256', 'md5'])
self.assertEqual(zip_json['presqt_hash'], self.hashes[zip_json['hash_algorithm']])
# Run the file through the fixity checker again to make sure it downloaded correctly
with zip_file.open('osf_download_{}/data/22776439564_7edbed7e10_o.jpg'.format(self.resource_id)) as myfile:
temp_file = myfile.read()
resource_dict = {
"file": temp_file,
"hashes": self.hashes,
"title": '22776439564_7edbed7e10_o.jpg',
"path": 'osf_download_{}/data/22776439564_7edbed7e10_o.jpg'.format(self.resource_id),
"metadata": {}
}
fixity, fixity_match = download_fixity_checker(resource_dict)
self.assertEqual(fixity['fixity'], True)
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
# Ensure no email was sent for this request as no email was provided.
self.assertEqual(len(mail.outbox), 0)
def test_success_200_json(self):
"""
Return a 200 along with a zip file of the resource requested.
"""
self.header['HTTP_PRESQT_EMAIL_OPT_IN'] = 'test@fakeemail.com'
shared_call_get_resource_zip(self, self.resource_id)
url = reverse('job_status', kwargs={'action': 'download', 'response_format': 'json'})
response = self.client.get(url, **self.header)
# Verify the status code
self.assertEqual(response.status_code, 200)
# Verify the status code and data
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['message'], 'Download successful.')
self.assertEqual(response.data['failed_fixity'], [])
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_success_202(self):
"""
Return a 202 if the resource has not finished being prepared on the server.
"""
shared_call_get_resource_zip(self, self.resource_id)
# Update the fixity_info.json to say the resource hasn't finished processing
write_file(self.process_info_path, self.initial_process_info, True)
url = reverse('job_status', kwargs={'action': 'download'})
response = self.client.get(url, **self.header)
# Verify the status code and content
self.assertEqual(response.status_code, 202)
self.assertEqual(
response.data['message'], 'Download is being processed on the server')
# Verify the status of the process_info file is 'in_progress'
process_info = read_file(self.process_info_path, True)
self.assertEqual(process_info['resource_download']['status'], 'in_progress')
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_error_400_bad_action(self):
"""
Return a 400 if the 'action' query parameter is bad
"""
shared_call_get_resource_zip(self, self.resource_id)
header = {}
url = reverse('job_status', kwargs={'action': 'bad_action'})
response = self.client.get(url, **header)
# Verify the status code and content
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'],
"PresQT Error: 'bad_action' is not a valid acton.")
def test_error_400(self):
"""
Return a 400 if the 'presqt-source-token' is missing in the headers
"""
shared_call_get_resource_zip(self, self.resource_id)
header = {}
url = reverse('job_status', kwargs={'action': 'download'})
response = self.client.get(url, **header)
# Verify the status code and content
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'],
"PresQT Error: 'presqt-source-token' missing in the request headers.")
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_success_400_bad_format(self):
"""
Return a 400 if the given response_format is bad.
"""
shared_call_get_resource_zip(self, self.resource_id)
url = reverse('job_status', kwargs={'action': 'download',
'response_format': 'bad_format'})
response = self.client.get(url, **self.header)
# Verify the status code
self.assertEqual(response.status_code, 400)
# Verify the status code and data
self.assertEqual(response.data['error'],
'PresQT Error: bad_format is not a valid format for this endpoint.')
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_error_404(self):
"""
Return a 404 if the ticket_number provided is not a valid ticket number.
"""
header = {'HTTP_PRESQT_SOURCE_TOKEN': 'bad_token'}
url = reverse('job_status', kwargs={'action': 'download'})
response = self.client.get(url, **header)
# Verify the status code and content
self.assertEqual(response.status_code, 404)
self.assertEqual(response.data['error'], "PresQT Error: Invalid ticket number, '{}'.".format(
hash_tokens('bad_token')))
def test_error_500_401_token_invalid(self):
"""
Return a 500 if the Resource._download_resource() method running on the server gets a 401 error
"""
self.header = {'HTTP_PRESQT_SOURCE_TOKEN': '1234'}
self.token = '1234'
shared_call_get_resource_zip(self, self.resource_id)
url = reverse('job_status', kwargs={'action': 'download'})
response = self.client.get(url, **self.header)
self.assertEqual(response.status_code, 500)
self.assertEqual(response.data['message'],
"Token is invalid. Response returned a 401 status code.")
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_error_500_403_unauthorized_container_resource(self):
"""
Return a 500 if the Resource._download_resource() function running on the server gets a 403 error
"""
self.resource_id = 'q5xmw'
shared_call_get_resource_zip(self, self.resource_id)
url = reverse('job_status', kwargs={'action': 'download'})
response = self.client.get(url, **self.header)
self.assertEqual(response.status_code, 500)
self.assertEqual(response.data['message'],
"User does not have access to this resource with the token provided.")
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_error_500_403_unauthorized_item_resource(self):
"""
Return a 500 if the Resource._download_resource() function running on the server gets a 403 error
"""
self.resource_id = '5cd98c2cf244ec0020e4d9d1'
shared_call_get_resource_zip(self, self.resource_id)
url = reverse('job_status', kwargs={'action': 'download'})
response = self.client.get(url, **self.header)
self.assertEqual(response.status_code, 500)
self.assertEqual(response.data['message'],
"User does not have access to this resource with the token provided.")
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_error_500_404_resource_not_found(self):
"""
Return a 500 if the Resource._download_resource() function running on the server gets a 404 error
"""
self.resource_id = 'bad_id'
shared_call_get_resource_zip(self, self.resource_id)
url = reverse('job_status', kwargs={'action': 'download'})
response = self.client.get(url, **self.header)
self.assertEqual(response.status_code, 500)
self.assertEqual(response.data['message'],
"Resource with id 'bad_id' not found for this user.")
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_error_500_410_gone(self):
"""
Return a 500 if the Resource._download_resource() function running on the server gets a 410 error
"""
self.resource_id = '5cd989c5f8214b00188af9b5'
shared_call_get_resource_zip(self, self.resource_id)
url = reverse('job_status', kwargs={'action': 'download'})
response = self.client.get(url, **self.header)
self.assertEqual(response.status_code, 500)
self.assertEqual(response.data['message'],
"The requested resource is no longer available.")
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
class TestDownloadJobPATCH(SimpleTestCase):
"""
Test the `api_v1/job_status/download/` endpoint's PATCH method.
Testing only PresQT core code.
"""
def setUp(self):
self.client = APIClient()
self.header = {'HTTP_PRESQT_SOURCE_TOKEN': OSF_TEST_USER_TOKEN,
'HTTP_PRESQT_EMAIL_OPT_IN': ''}
self.resource_id = 'cmn5z'
self.target_name = 'osf'
self.ticket_number = hash_tokens(OSF_TEST_USER_TOKEN)
self.token = OSF_TEST_USER_TOKEN
def test_success_200(self):
"""
Return a 200 for successful cancelled download process.
"""
download_url = reverse('resource', kwargs={'target_name': self.target_name,
'resource_id': self.resource_id,
'resource_format': 'zip'})
self.client.get(download_url, **self.header)
ticket_path = 'mediafiles/jobs/{}'.format(self.ticket_number)
# Verify process_info file status is 'in_progress' initially
process_info = read_file('{}/process_info.json'.format(ticket_path), True)
self.assertEqual(process_info['resource_download']['status'], 'in_progress')
# Wait until the spawned off process has a function_process_id to cancel the download
while not process_info['resource_download']['function_process_id']:
try:
process_info = read_file('{}/process_info.json'.format(ticket_path), True)
except json.decoder.JSONDecodeError:
# Pass while the process_info file is being written to
pass
download_patch_url = reverse('job_status', kwargs={'action': 'download'})
download_patch_url_response = self.client.patch(download_patch_url, **self.header)
self.assertEquals(download_patch_url_response.status_code, 200)
process_info = read_file('{}/process_info.json'.format(ticket_path), True)
download_process_info = process_info['resource_download']
self.assertEquals(download_process_info['message'], 'Download was cancelled by the user')
self.assertEquals(download_process_info['status'], 'failed')
self.assertEquals(download_process_info['status_code'], '499')
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_success_406(self):
"""
Return a 406 for unsuccessful cancel because the download finished already.
"""
download_url = reverse('resource', kwargs={'target_name': self.target_name,
'resource_id': '5cd98510f244ec001fe5632f',
'resource_format': 'zip'})
self.client.get(download_url, **self.header)
ticket_path = 'mediafiles/jobs/{}'.format(self.ticket_number)
# Verify process_info file status is 'in_progress' initially
process_info = read_file('{}/process_info.json'.format(ticket_path), True)
self.assertEqual(process_info['resource_download']['status'], 'in_progress')
# Wait until the spawned off process finishes to attempt to cancel download
while process_info['resource_download']['status'] == 'in_progress':
try:
process_info = read_file('{}/process_info.json'.format(ticket_path), True)
except json.decoder.JSONDecodeError:
# Pass while the process_info file is being written to
pass
download_patch_url = reverse('job_status', kwargs={'action': 'download'})
download_patch_url_response = self.client.patch(download_patch_url, **self.header)
self.assertEquals(download_patch_url_response.status_code, 406)
self.assertEquals(download_patch_url_response.data['message'], 'Download successful.')
process_info = read_file('{}/process_info.json'.format(ticket_path), True)
self.assertEquals(process_info['resource_download']['message'], 'Download successful.')
self.assertEquals(process_info['resource_download']['status'], 'finished')
self.assertEquals(process_info['resource_download']['status_code'], '200')
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
def test_error_400(self):
"""
Return a 400 if the 'presqt-source-token' is missing in the headers
"""
header = {}
url = reverse('job_status', kwargs={'action': 'download'})
response = self.client.patch(url, **header)
# Verify the status code and content
self.assertEqual(response.status_code, 400)
self.assertEqual(response.data['error'],
"PresQT Error: 'presqt-source-token' missing in the request headers.")
def test_success_400_bad_format(self):
"""
Return a 400 if the given response_format is bad.
"""
shared_call_get_resource_zip(self, '5cd98510f244ec001fe5632f')
url = reverse('job_status', kwargs={'action': 'download',
'response_format': 'bad_format'})
response = self.client.patch(url, **self.header)
# Verify the status code
self.assertEqual(response.status_code, 400)
# Verify the status code and data
self.assertEqual(response.data['error'],
'PresQT Error: bad_format is not a valid format for this endpoint.')
# Delete corresponding folder
shutil.rmtree('mediafiles/jobs/{}'.format(self.ticket_number))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author Christopher Wingard
@brief Load the PCO2W data from the uncabled, Coastal Endurance Surface
Moorings and processes the data to generate QARTOD Gross Range and
Climatology test limits
"""
import dateutil.parser as parser
import os
import pandas as pd
import pytz
from ooi_data_explorations.common import get_annotations, load_gc_thredds, add_annotation_qc_flags
from ooi_data_explorations.combine_data import combine_datasets
from ooi_data_explorations.uncabled.process_pco2w import pco2w_instrument, quality_checks
from ooi_data_explorations.qartod.qc_processing import identify_blocks, create_annotations, process_gross_range, \
process_climatology, inputs
def generate_qartod(site, node, sensor, cut_off):
"""
Load all of the pCO2 data for a defined reference designator (using the
site, node and sensor names to construct the reference designator)
collected via the recovered instrument method and combine them into a
single data set from which QARTOD test limits for the gross range and
climatology tests can be calculated.
:param site: Site designator, extracted from the first part of the
reference designator
:param node: Node designator, extracted from the second part of the
reference designator
:param sensor: Sensor designator, extracted from the third and fourth part
of the reference designator
:param cut_off: string formatted date to use as cut-off for data to add
to QARTOD test sets
:return annotations: Initial list of auto-generated HITL annotations as
a pandas dataframe
:return gr_lookup: CSV formatted strings to save to a csv file for the
QARTOD gross range lookup tables.
:return clm_lookup: CSV formatted strings to save to a csv file for the
QARTOD climatology lookup tables.
:return clm_table: CSV formatted strings to save to a csv file for the
QARTOD climatology range tables.
"""
# load the recovered instrument data
data = load_gc_thredds(site, node, sensor, 'recovered_inst', 'pco2w_abc_instrument', '^(?!.*blank).*PCO2W.*nc$')
data = pco2w_instrument(data)
# resample the data into a 3 hour, median averaged time series
data = combine_datasets(data, None, None, 180)
# recalculate the quality flags as averaging will alter them
data['pco2_seawater_quality_flag'] = quality_checks(data)
# create a boolean array of the data marked as "fail" by the pCO2 quality checks and generate initial
# HITL annotations that can be combined with system annotations and pCO2 quality checks to create
# a cleaned up data set prior to calculating the QARTOD test values
fail = data.pco2_seawater_quality_flag.where(data.pco2_seawater_quality_flag == 4).notnull()
blocks = identify_blocks(fail, [24, 96])
hitl = create_annotations(site, node, sensor, blocks)
# get the current system annotations for the sensor
annotations = get_annotations(site, node, sensor)
annotations = pd.DataFrame(annotations)
if not annotations.empty:
annotations = annotations.drop(columns=['@class'])
annotations['beginDate'] = pd.to_datetime(annotations.beginDT, unit='ms').dt.strftime('%Y-%m-%dT%H:%M:%S')
annotations['endDate'] = pd.to_datetime(annotations.endDT, unit='ms').dt.strftime('%Y-%m-%dT%H:%M:%S')
# append the fail annotations to the existing annotations
annotations = annotations.append(pd.DataFrame(hitl), ignore_index=True, sort=False)
# create a roll-up annotation flag
data = add_annotation_qc_flags(data, annotations)
# clean-up the data, removing values that fail the pCO2 quality checks or were marked as fail in the annotations
data = data.where((data.pco2_seawater_quality_flag != 4) & (data.rollup_annotations_qc_results != 4))
# if a cut_off date was used, limit data to all data collected up to the cut_off date.
# otherwise, set the limit to the range of the downloaded data.
if cut_off:
cut = parser.parse(cut_off)
cut = cut.astimezone(pytz.utc)
end_date = cut.strftime('%Y-%m-%dT%H:%M:%S')
src_date = cut.strftime('%Y-%m-%d')
else:
cut = parser.parse(data.time_coverage_end)
cut = cut.astimezone(pytz.utc)
end_date = cut.strftime('%Y-%m-%dT%H:%M:%S')
src_date = cut.strftime('%Y-%m-%d')
data = data.sel(time=slice('2014-01-01T00:00:00', end_date))
# create the initial gross range entry
gr = process_gross_range(data, ['pco2_seawater'], [200, 2000], site=site, node=node, sensor=sensor)
# re-work gross entry for the different streams and parameter names
gr_lookup = pd.DataFrame()
gr_lookup = gr_lookup.append([gr, gr, gr], ignore_index=True)
gr_lookup['parameter'][0] = {'inp': 'pco2_seawater'}
gr_lookup['stream'][0] = 'pco2w_abc_dcl_instrument'
gr_lookup['parameter'][1] = {'inp': 'pco2_seawater'}
gr_lookup['stream'][1] = 'pco2w_abc_dcl_instrument_recovered'
gr_lookup['parameter'][2] = {'inp': 'pco2_seawater'}
gr_lookup['stream'][2] = 'pco2w_abc_instrument'
gr_lookup['source'] = ('Sensor min/max based on the vendor standard calibration range. '
'The user min/max is the historical mean of all data collected '
'up to {} +/- 3 standard deviations.'.format(src_date))
# create and format the climatology entry and table
cll, clm_table = process_climatology(data, ['pco2_seawater'], [200, 2000], site=site, node=node, sensor=sensor)
# re-work climatology entry for the different streams and parameter names
clm_lookup = pd.DataFrame()
clm_lookup = clm_lookup.append([cll, cll, cll])
clm_lookup['parameters'][0] = {'inp': 'pco2_seawater', 'tinp': 'time', 'zinp': 'None'}
clm_lookup['stream'][0] = 'pco2w_abc_dcl_instrument'
clm_lookup['parameters'][1] = {'inp': 'pco2_seawater', 'tinp': 'time', 'zinp': 'None'}
clm_lookup['stream'][1] = 'pco2w_abc_dcl_instrument_recovered'
clm_lookup['parameters'][2] = {'inp': 'pco2_seawater', 'tinp': 'time', 'zinp': 'None'}
clm_lookup['stream'][2] = 'pco2w_abc_instrument'
return annotations, gr_lookup, clm_lookup, clm_table
def main(argv=None):
"""
Download the PCO2W data from the Gold Copy THREDDS server and create the
QARTOD gross range and climatology test lookup tables.
"""
# setup the input arguments
args = inputs(argv)
site = args.site
node = args.node
sensor = args.sensor
cut_off = args.cut_off
# create the initial HITL annotation blocks, the QARTOD gross range and climatology lookup values, and
# the climatology table for the pco2_seawater parameter
annotations, gr_lookup, clm_lookup, clm_table = generate_qartod(site, node, sensor, cut_off)
# save the resulting annotations and qartod lookups and tables
out_path = os.path.join(os.path.expanduser('~'), 'ooidata/qartod/pco2w')
out_path = os.path.abspath(out_path)
if not os.path.exists(out_path):
os.makedirs(out_path)
# save the annotations to a csv file for further processing
csv_names = ['id', 'subsite', 'node', 'sensor', 'method', 'stream', 'parameters',
'beginDate', 'endDate', 'exclusionFlag', 'qcFlag', 'source', 'annotation']
anno_csv = '-'.join([site, node, sensor]) + '.quality_annotations.csv'
annotations.to_csv(os.path.join(out_path, anno_csv), index=False, columns=csv_names)
# save the gross range values to a csv for further processing
csv_names = ['subsite', 'node', 'sensor', 'stream', 'parameter', 'qcConfig', 'source']
gr_csv = '-'.join([site, node, sensor]) + '.gross_range.csv'
gr_lookup.to_csv(os.path.join(out_path, gr_csv), index=False, columns=csv_names)
# save the climatology values and table to a csv for further processing
csv_names = ['subsite', 'node', 'sensor', 'stream', 'parameters', 'climatologyTable', 'source']
clm_csv = '-'.join([site, node, sensor]) + '.climatology.csv'
clm_tbl = '-'.join([site, node, sensor]) + '-pco2_seawater.csv'
clm_lookup.to_csv(os.path.join(out_path, clm_csv), index=False, columns=csv_names)
with open(os.path.join(out_path, clm_tbl), 'w') as clm:
clm.write(clm_table[0])
if __name__ == '__main__':
main()
|
# Copyright 2019-2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Base classes from which a metrics consumer (i.e. Stackdriver, ffwd, etc.)
will need to implement.
New consumers are required to implement the :class:`AbstractRelayClient`, and
three metrics objects based off of :class:`BaseMetric`: a counter, a gauge, and
a timer.
"""
import abc
import six
class _DummyAttribute(object):
# for the ability to do `FOO_ATTR = abstract_attr()` as well as
# decorate a property method
pass
def abstract_attr(obj=None):
"""Set an attribute or a property as abstract.
Supports class-level attributes as well as methods defined as a
``@property``.
Usage:
.. code-block:: python
class Foo(object):
my_foo_attribute = abstract_attr()
@property
@abstract_attr
def my_foo_property(self):
pass
Args:
obj (callable): Python object to "decorate", i.e. a class method. If
none is provided, a dummy object is created in order to attach
the ``__isabstractattr__`` attribute (similar to
``__isabstractmethod__`` from ``abc.abstractmethod``).
Returns object with ``__isabstractattr__`` attribute set to ``True``.
"""
if not obj:
obj = _DummyAttribute()
obj.__isabstractattr__ = True
return obj
def _has_abstract_attributes_implemented(cls, name, bases):
"""Verify a given class has its abstract attributes implemented."""
for base in bases:
abstract_attrs = getattr(base, "_klio_metrics_abstract_attributes", [])
class_attrs = getattr(cls, "_klio_metrics_all_attributes", [])
for attr in abstract_attrs:
if attr not in class_attrs:
err_str = (
"Error instantiating class '{0}'. Implementation of "
"abstract attribute '{1}' from base class '{2}' is "
"required.".format(name, attr, base.__name__)
)
raise NotImplementedError(err_str)
def _get_all_attributes(clsdict):
return [name for name, val in six.iteritems(clsdict) if not callable(val)]
def _get_abstract_attributes(clsdict):
return [
name
for name, val in six.iteritems(clsdict)
if not callable(val) and getattr(val, "__isabstractattr__", False)
]
class _ABCBaseMeta(abc.ABCMeta):
"""Enforce behavior upon implementations of ABC classes."""
def __init__(cls, name, bases, clsdict):
_has_abstract_attributes_implemented(cls, name, bases)
def __new__(metaclass, name, bases, clsdict):
clsdict[
"_klio_metrics_abstract_attributes"
] = _get_abstract_attributes(clsdict)
clsdict["_klio_metrics_all_attributes"] = _get_all_attributes(clsdict)
cls = super(_ABCBaseMeta, metaclass).__new__(
metaclass, name, bases, clsdict
)
return cls
class AbstractRelayClient(six.with_metaclass(_ABCBaseMeta)):
"""Abstract base class for all metric consumer relay clients.
Each new consumer (i.e. Stackdriver, ffwd, logging-based metrics)
will need to implement this relay class.
Attributes:
RELAY_CLIENT_NAME (str): must match the key in ``klio-job.yaml``
under ``job_config.metrics``.
"""
RELAY_CLIENT_NAME = abstract_attr()
def __init__(self, klio_config):
self.klio_config = klio_config
@abc.abstractmethod
def unmarshal(self, metric):
"""Returns a dictionary-representation of the ``metric`` object"""
pass
@abc.abstractmethod
def emit(self, metric):
"""Emit the given metric object to the particular consumer.
``emit`` will be run in a threadpool separate from the transform,
and any errors raised from the method will be logged then ignored.
"""
pass
@abc.abstractmethod
def counter(self, name, value=0, transform=None, **kwargs):
"""Return a newly instantiated counter-type metric specific for
the particular consumer.
Callers to the ``counter`` method will store new counter objects
returned in memory for simple caching.
"""
pass
@abc.abstractmethod
def gauge(self, name, value=0, transform=None, **kwargs):
"""Return a newly instantiated gauge-type metric specific for
the particular consumer.
Callers to the ``gauge`` method will store new gauge objects
returned in memory for simple caching.
"""
pass
@abc.abstractmethod
def timer(self, name, transform=None, **kwargs):
"""Return a newly instantiated timer-type metric specific for
the particular consumer.
Callers to the ``timer`` method will store new timer objects
returned in memory for simple caching.
"""
pass
class BaseMetric(object):
"""Base class for all metric types.
A consumer must implement a counter metric, a gauge metric, and a
timer metric.
"""
def __init__(self, name, value=0, transform=None, **kwargs):
self.name = name
self.value = value
self.transform = transform
def update(self, value):
self.value = value
|
from .plugins import plugins
from itsdangerous import TimedJSONWebSignatureSerializer
from datetime import datetime
from flask import current_app
# plugins = create_plugins()
db, migrate, bcrypt, login_manager = plugins
class User(db.Model):
id = db.Column(db.Integer, primary_key=True, nullable=False)
email = db.Column(db.String(40), unique=True, nullable=True)
email_verified = db.Column(db.Boolean, nullable=False)
password = db.Column(db.String(64), nullable=True)
is_admin = db.Column(db.Boolean, nullable=False)
@classmethod
def create_password_hash(cls, new_password):
return bcrypt.generate_password_hash(new_password).decode("utf-8")
def __init__(self, **kwargs):
if kwargs["password"] is not None:
kwargs["password"] = User.create_password_hash(kwargs["password"])
if "email_verified" not in kwargs:
kwargs["email_verified"] = False
super().__init__(**kwargs)
def __repr__(self):
return str(self.email)
def update_password(self, new_password):
self.password = User.create_password_hash(new_password)
def update_email(self, new_email):
self.email = new_email
self.email_verified = False
def create_json_web_token(self):
serializer = TimedJSONWebSignatureSerializer(
current_app.config["SECRET_KEY"], 1800
)
return serializer.dumps({"user_id": self.id}).decode("utf-8")
@staticmethod
def verify_json_web_token(token):
serializer = TimedJSONWebSignatureSerializer(current_app.config["SECRET_KEY"])
try:
user_id = serializer.loads(token)["user_id"]
except KeyError:
return None
return User.query.get(user_id)
def check_password_hash(self, alleged_password):
return bcrypt.check_password_hash(self.password, alleged_password)
@property
def is_anonymous(self):
return False if self.email and self.password else True
@property
def is_authenticated(self):
return False if self.is_anonymous else True
@property
def is_active(self):
return self.is_authenticated
@property
def is_admin_authenticated(self):
return self.email and self.password and self.is_admin
def get_id(self):
return str(self.id)
class NewEmail(db.Model):
"""
Email table with user IDs as the primary key, representing currently 'active' emails
An email will be removed from this table after the recipient takes action or in a few hours
"""
id = db.Column(
db.Integer, primary_key=True, nullable=False
)
user_id = db.Column(
db.Integer, unique=True, nullable=False #db.ForeignKey("user.id")
)
typ = db.Column(db.Integer, nullable=False)
creation_time = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
class OldEmail(db.Model):
"""
Valid integers for Email Types:
0 = verification
1 = password reset
"""
id = db.Column(
db.Integer, primary_key=True, nullable=False
)
user_id = db.Column(
db.Integer, db.ForeignKey("user.id"), nullable=False
)
typ = db.Column(db.Integer, nullable=False)
creation_time = db.Column(db.DateTime, nullable=False)
archive_time = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
@classmethod
def from_email(cls, email: NewEmail):
return cls(user_id=email.user_id, typ=email.typ, creation_time=email.creation_time)
# Shop Module's Profile_Models
# ---------------------------------------------------
class ShippingAddress(db.Model):
"""Saved for registered user after placing an order.
If a user has no ShippingAddress then we use the staticmethod default()"""
@staticmethod
def default():
return {
"first_name": "",
"last_name": "",
"phone": "",
"address1": "",
"address2": "",
"postal_code": "",
"city": "",
"province": "",
}
@staticmethod
def names():
return {
"first_name": "First Name",
"last_name": "Last Name",
"phone": "Phone Number",
"address1": "Address 1",
"address2": "Address 2",
"postal_code": "Postal Code",
"city": "City",
"province": "Province",
}
user_id = db.Column(
db.Integer, db.ForeignKey("user.id"), primary_key=True, nullable=False
)
first_name = db.Column(db.String(40), nullable=False)
last_name = db.Column(db.String(40), nullable=False)
phone = db.Column(db.String(11), nullable=False)
address1 = db.Column(db.String(30), nullable=False)
address2 = db.Column(db.String(30), nullable=False)
postal_code = db.Column(db.String(6), nullable=False)
city = db.Column(db.String(30), nullable=False)
province = db.Column(db.String(2), nullable=False)
|
"""phish_manager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.urls import path, include
from rest_framework.urlpatterns import format_suffix_patterns
from phish_manager.phisherman import views
# Wire up our API using automatic URL routing.
# Additionally, we include login URLs for the browsable API.
urlpatterns = [
path('incidents/', views.incident_list),
path('incidents/<int:id>/', views.incident_details),
path('email/', views.post_email)
]
urlpatterns = format_suffix_patterns(urlpatterns)
|
from django.db import models
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
#from django.db.models import Max
from oscar.apps.payment.abstract_models import AbstractSource
from web_payments.django.models import BasePayment
from web_payments import NotSupported
from decimal import Decimal
try:
from django.urls import reverse
except ImportError:
from django.core.urlresolvers import reverse
CENTI0 = Decimal("0.00")
class Source(AbstractSource, BasePayment):
variant = None
temp_shipping = None
temp_billing = None
temp_extra = None
temp_email = None
temp_form = None
order = models.ForeignKey(
'order.Order',
on_delete=models.CASCADE,
related_name='sources',
verbose_name=_("Order"), null=True)
amount_refunded = models.DecimalField(
_("Amount Refunded"), decimal_places=2, max_digits=12,
default=CENTI0)
# for retrieving failed transactions (not enabled by default)
order_number = models.CharField(
_("Order number"), max_length=128, null=True, blank=True)
currency = models.CharField(max_length=10)
def get_success_url(self):
return "{}://{}{}".format(getattr(settings, "PAYMENT_PROTOCOL", "https"), Site.objects.get_current().domain, reverse('checkout:payment-details'))
get_failure_url = get_success_url
def get_shipping_address(self):
if self.temp_shipping:
return {
"first_name": self.temp_shipping.first_name,
"last_name": self.temp_shipping.last_name,
"address_1": self.temp_shipping.line1,
"address_2": self.temp_shipping.line2,
"city": self.temp_shipping.line4,
"postcode": self.temp_shipping.postcode,
"country_code": self.temp_shipping.country.iso_3166_1_a2,
"country_area": self.temp_shipping.state,
"phone_number": self.temp_shipping.phone_number,
"email": self.temp_email
}
else:
return {
"first_name": self.order.shipping_address.first_name,
"last_name": self.order.shipping_address.last_name,
"address_1": self.order.shipping_address.line1,
"address_2": self.order.shipping_address.line2,
"city": self.order.shipping_address.line4,
"postcode": self.order.shipping_address.postcode,
"country_code": self.order.shipping_address.country.iso_3166_1_a2,
"country_area": self.order.shipping_address.state,
"phone_number": self.order.shipping_address.phone_number,
"email": self.order.guest_email
}
def get_billing_address(self):
if self.temp_billing:
return {
"first_name": self.temp_billing.first_name,
"last_name": self.temp_billing.last_name,
"address_1": self.temp_billing.line1,
"address_2": self.temp_billing.line2,
"city": self.temp_billing.line4,
"postcode": self.temp_billing.postcode,
"country_code": self.temp_billing.country.iso_3166_1_a2,
"country_area": self.temp_billing.state,
"phone_number": self.temp_billing.phone_number,
"email": self.temp_email
}
else:
return {
"first_name": self.order.billing_address.first_name,
"last_name": self.order.billing_address.last_name,
"address_1": self.order.billing_address.line1,
"address_2": self.order.billing_address.line2,
"city": self.order.billing_address.line4,
"postcode": self.order.billing_address.postcode,
"country_code": self.order.billing_address.country.iso_3166_1_a2,
"country_area": self.order.billing_address.state,
"phone_number": self.order.billing_address.phone_number,
"email": self.order.guest_email
}
def allocate(self, amount, reference='', status=''):
"""
Convenience method for ring-fencing money against this source
"""
raise NotSupported()
def debit(self, amount=None, reference='', status=''):
"""
Convenience method for recording debits against this source
"""
if amount is None:
amount = self.balance
self.amount_debited += amount
self.save()
self._create_transaction(
AbstractTransaction.DEBIT, amount, reference, status)
debit.alters_data = True
def refund(self, amount, reference='', status=''):
"""
Convenience method for recording refunds against this source
amount None: all
"""
amount = BasicPayments.refund(self, amount)
self.amount_refunded += amount
self._create_transaction(
AbstractTransaction.REFUND, amount, reference, status)
refund.alters_data = True
def get_payment_extra(self):
if self.temp_extra:
return self.temp_extra
else:
return {
"tax": self.order.total_incl_tax-self.order.total_excl_tax if self.order else CENTI0,
"delivery": self.order.shipping_incl_tax if self.order else CENTI0
}
@property
def reference(self):
return self.source_type.code
@property
def variant(self):
return self.source_type.code
@property
def label(self):
extra = self.get_provider(self.source_type.code).extra
return "-".join([extra.get("verbose_name", extra["name"]), self.id])
def save(self, *args, **kwargs):
self.create_token()
return AbstractSource.save(self, *args, **kwargs)
@property
def amount_allocated(self):
return self.total.quantize(CENTI0)
@property
def amount_debited(self):
return self.captured_amount.quantize(CENTI0)
@property
def balance(self):
"""
Return the balance of this source
"""
return self.captured_amount
@property
def amount_available_for_refund(self):
"""
Return the amount available to be refunded
"""
return self.captured_amount
from oscar.apps.payment.models import * # noqa isort:skip
|
import unittest
from application.run import app
class ApiTest(unittest.TestCase):
def test_fake_request(self):
client = app.test_client(self)
response = client.get('/api/v1/demo')
assert response.status_code == 200, "Should return status code 200"
if __name__ == '__main__':
unittest.main()
|
Паша очень любит кататься на общественном транспорте, а получая билет,
сразу проверяет, счастливый ли ему попался. Билет считается счастливым,
если сумма первых трех цифр совпадает с суммой последних трех цифр номера билета.
Однако Паша очень плохо считает в уме, поэтому попросил вас написать
программу, которая проверит равенство сумм и выведет "Счастливый", если
суммы совпадают, и "Обычный", если суммы различны.
На вход программе подаётся строка из шести цифр.
Выводить нужно только слово "Счастливый" или "Обычный", с большой буквы.
# put your python code here
s = str(input())
sum1=int(s[0])+int(s[1])+int(s[2])
sum2=int(s[3])+int(s[4])+int(s[5])
if sum1==sum2:
print('Счастливый')
else:
print('Обычный')
|
import fire
import imageio
letter2morse = {"A": ".-",
"B": "-...",
"C": "-.-.",
"D": "-..",
"E": ".",
"F": "..-.",
"G": "--.",
"H": "....",
"I": "..",
"J": ".---",
"K": "-.-",
"L": ".-..",
"M": "--",
"N": "-.",
"O": "---",
"P": ".--.",
"Q": "--.-",
"R": ".-.",
"S": "...",
"T": "-",
"U": "..-",
"V": "...-",
"W": ".--",
"X": "-..-",
"Y": "-.--",
"Z": "--..",
"1": ".----",
"2": "..---",
"3": "...--",
"4": "....-",
"5": ".....",
"6": "-....",
"7": "--...",
"8": "---..",
"9": "----.",
"0": "-----",
".": ".-.-.-",
",": "--..--",
"?": "..--.."}
class MorseGif(object):
'''
Sometimes you want to make a gif that's blinking out a message in morse
'''
def __init__(self, opening_frames=5, duration=0.2, char_frames=2,
dit_frames=1, dah_frames=3, space_frames=2):
'''
Defaults for the gif
Args:
opening_frames (int): how many frames of "off" before the code
(default: 5)
duration (float): time for each frame to be desplayed
(default: 0.2)
char_frames (int): frames of 'off' between characters (default: 2)
dit_frames (int): frames of 'on' for dit (.) (default: 1)
dah_frames (int): frames of 'on' for dah (-) (default: 3)
space_frames (int): frames of 'off' for spaces between words
(default: 2)
'''
self.opening_frames = opening_frames
self.duration = duration
self.char_frames = char_frames
self.dit_frames = dit_frames
self.dah_frames = dah_frames
self.space_frames = space_frames
def make_gif(self, text, offimg, onimg, outgif,):
'''
This is the main method of the ``MorseGif``onject given two stills
and amessage it will make a gif containing the morse
Args:
text (str): content of the message to be converted to morse code
offimg (str): path to the still for the "off" portion of the code
(no sound/signal)
onimg (str): path to the still for the "on" portion of the code
(sound/signal)
outgif (str): path to where the gif output should be written
'''
self.on = imageio.imread(onimg)
self.off = imageio.imread(offimg)
self.images = [self.off for x in range(self.opening_frames)]
self.outgif = outgif
self._make_letter2ditdah()
self._text_to_gif(text)
def _make_letter2ditdah(self):
'''
Construct a letter to gif morse dictionary
'''
c2d = {".": [self.on for x in range(self.dit_frames)],
"-": [self.on for x in range(self.dah_frames)]}
self.letter2ditdah = {}
for l, morse in letter2morse.items():
tmp = []
for c in letter2morse[l]:
for d in c2d[c]:
tmp.append(d)
tmp.append(self.off)
self.letter2ditdah[l] = tmp
self.letter2ditdah[' '] = [self.off for x in range(self.space_frames)]
self.letter2ditdah['nextchar'] = [self.off for x in range(self.char_frames)] # noqa
def _text_to_gif(self, text):
'''
Actually construct the gif
'''
text = text.upper()
for l in text:
self.images += self.letter2ditdah[l]
self.images += self.letter2ditdah['nextchar']
imageio.mimsave(self.outgif, self.images, duration=self.duration)
if __name__ == "__main__":
fire.Fire(MorseGif)
|
from django.db import models
# Create your models here.
class AdminPage(models.Model):
name = models.CharField(max_length=20)
age = models.PositiveSmallIntegerField()
address = models.CharField(max_length=30)
def __str__(self):
return f'{self.name} {self.age} {self.address}'
|
"""
============================================
Generate ROC Curves for all the classifiers.
============================================
We consider all the 31 features for classification.
For more details about IIITBh-keystroke dataset goto: https://github.com/aroonav/IIITBh-keystroke
For more details about CMU's dataset goto: http://www.cs.cmu.edu/~keystroke/
These features are used to train and test NN & SVM models for
classifying users on the basis of the timings between their keystrokes.
This will generate the ROC Curves.
"""
# print(__doc__)
import os
import numpy as np
import csv
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from sklearn import svm
# Fast order-preserving elimination of duplicates
def removeDupes(seq):
seen = set()
return [ x for x in seq if x not in seen and not seen.add(x)]
def convert_numerical_train(labels):
x = []
for i in range(noOfTotalClasses):
for j in range(noOfTrainingVectors):
# x.append(int(labels[(i*noOfTotalVectors) + j][-2:]))
x.append(i)
return x
def convert_numerical_test(labels):
x = []
for i in range(noOfTotalClasses):
for j in range(noOfTestingVectors):
# x.append(int(labels[(i*noOfTotalVectors) + j][-2:]))
x.append(i)
return x
def load_trainingData():
"""
This reads file DSL-StrongPasswordData.csv and returns the training data in
an ndarray of shape noOfTrainingVectors*noOfFeatures and target ndarray
of shape (noOfTrainingVectors*noOfTotalClasses)*1.
"""
global datasetPath
dataset = np.empty([0,noOfFeatures])
target = np.empty(0)
file = open(datasetPath)
reader = csv.reader(file)
reader.next()
for i in range(noOfTotalClasses):
# for i in range(noOfTotalClasses+1):
# if i == 0:
# for j in xrange(noOfTotalVectors):
# reader.next()
# continue
for j in range(noOfTrainingVectors):
tempData = reader.next() # Read one vector
currentSubject = tempData[0] # Save subject's name
for k in range(3): # Discard first 3 values
del tempData[0]
tempData = map(float, tempData)
tempData = np.array(tempData, ndmin=2)
dataset = np.append(dataset, tempData, axis=0)
target = np.append(target, [currentSubject], axis=0)
for j in range(noOfTestingVectors): # Discard testing vectors for now
tempData = reader.next() # Discard one vector
# Discard the rest of the unused vectors now
for j in range(noOfTotalVectors - noOfTrainingVectors - noOfTestingVectors):
tempData = reader.next() # Discard one vector
return dataset,target
def load_testingData():
"""
TODO: Merge load_testingData() and load_trainingData() functions
This reads file DSL-StrongPasswordData.csv and returns the testing data in
an ndarray of shape noOfTestingVectors*noOfFeatures and target ndarray
of shape (noOfTestingVectors*noOfTotalClasses)*1.
"""
global datasetPath
dataset = np.empty([0,noOfFeatures])
target = np.empty(0)
file = open(datasetPath)
reader = csv.reader(file)
reader.next()
for i in range(noOfTotalClasses):
# for i in range(noOfTotalClasses+1):
# if i == 0:
# for j in xrange(noOfTotalVectors):
# reader.next()
# continue
for j in range(noOfTrainingVectors): # Discard training vectors now
tempData = reader.next() # Discard one vector
for j in range(noOfTestingVectors):
tempData = reader.next() # Read one vector
currentSubject = tempData[0] # Save subject's name
for k in range(3): # Discard first 3 values
del tempData[0]
tempData = map(float, tempData)
tempData = np.array(tempData, ndmin=2)
dataset = np.append(dataset, tempData, axis=0)
target = np.append(target, [currentSubject], axis=0)
# Discard the rest of the unused vectors now
for j in range(noOfTotalVectors - noOfTrainingVectors - noOfTestingVectors):
tempData = reader.next() # Discard one vector
return dataset,target
# Total number of classehidden_layer_sizes.
noOfTotalClasses = 5
# Total number of vectors available for one class.
noOfTotalVectors = 250
# For training purposes for one class use first `noOfTrainingVectors` vectors.
noOfTrainingVectors = 96
# For testing purposes for one class use first `noOfTestingVectors` vectors.
# noOfTestingVectors = noOfTotalVectors - noOfTrainingVectors
noOfTestingVectors = 154
# Each vector contains `noOfFeatures` features.
noOfFeatures = 31
# This contains the path for the dataset.
datasetPath = os.path.normpath(os.getcwd() + os.sep + os.pardir)
# datasetPath = datasetPath + os.sep + "DSL-StrongPasswordData.csv"
datasetPath = datasetPath + os.sep + "IIITBh-Small.csv"
noOfInputNodes = noOfFeatures
# The number of Hidden nodes is taken as (2*P)/3, where P is the number of the input nodes
noOfHiddenNodes = 15
# The number of output nodes is equal to the number of classes
noOfOutputNodes = noOfTotalClasses
# X: We take all the features. Or we can take only some features here by slicing.
# y: This contains the actual classes for each training vector i.e the target.
X,y = load_trainingData()
test_X,test_y = load_testingData()
classes = removeDupes(y)
y = convert_numerical_train(y)
test_y = convert_numerical_test(test_y)
y = np.array(y)
test_y = np.array(test_y)
# binarize output labels
y_binarized = label_binarize(y, classes=range(noOfTotalClasses))
test_y_binarized = label_binarize(test_y, classes=range(noOfTotalClasses))
# Neural Classifiers
sgd_clf = MLPClassifier(hidden_layer_sizes = (noOfInputNodes, noOfHiddenNodes, noOfOutputNodes),
activation = "tanh", solver = "sgd", max_iter = 1800, learning_rate = "adaptive", learning_rate_init="0.01",
random_state=0)
adam_clf = MLPClassifier(hidden_layer_sizes = (noOfInputNodes, noOfHiddenNodes, noOfOutputNodes),
activation = "tanh", solver = "adam", max_iter = 1000, random_state=0)
# SVM Classifiers
rbf_svc_clf = OneVsRestClassifier((svm.SVC(kernel='rbf', gamma=0.05, C=401, probability=True)))
lin_svc_clf = OneVsRestClassifier((svm.SVC(kernel='linear', C=801, gamma=0.01, probability=True)))
sgd = sgd_clf.fit(X,y_binarized)
adam = adam_clf.fit(X,y_binarized)
lin_svc = lin_svc_clf.fit(X, y_binarized)
rbf_svc = rbf_svc_clf.fit(X, y_binarized)
labels = ['Neural(SGD)', 'Neural(adam)', 'SVC(linear)', 'SVC(rbf)']
colors = ['black', 'blue', 'darkorange', 'violet', 'yellow', 'red', 'pink', 'green', 'magenta', 'cyan', 'grey', 'brown']
for i, clf in enumerate((sgd, adam, lin_svc, rbf_svc)):
y_score = clf.predict_proba(test_X)
fpr = dict()
tpr = dict()
roc_auc = dict()
for j in range(noOfTotalClasses):
fpr[j], tpr[j], thresholds = roc_curve(test_y_binarized[:, j], y_score[:, j])
roc_auc[j] = auc(fpr[j], tpr[j])
plt.figure()
lw = 2
for j,color in enumerate(colors[:noOfTotalClasses]):
plt.plot(fpr[j], tpr[j], color=color,
lw=lw, label=classes[j]+' (area = %0.2f)' % roc_auc[j])
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic for '+labels[i])
plt.legend(loc="lower right")
plt.show()
|
# Generated by Django 3.1.3 on 2020-11-26 16:19
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0014_auto_20201126_1943'),
]
operations = [
migrations.AddField(
model_name='reservation',
name='reference_name',
field=models.CharField(default='', max_length=40),
),
migrations.AlterField(
model_name='reservation',
name='timestamp',
field=models.DateTimeField(default=datetime.datetime(2020, 11, 26, 21, 49, 26, 199771)),
),
]
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import os
setup(
name='project',
version='1.0',
description="This is a test project",
author="Ashley Camba",
author_email='ashwoods@gmail.com',
url='https://github.com/ashwoods/pywhale',
packages=find_packages(),
package_data={'project': ['index.html']},
scripts=['manage.py'],
)
|
import json
import numpy as np
import matplotlib.pyplot as plt
import os
from scipy.stats.stats import pearsonr
from scipy.stats.stats import spearmanr
from sklearn.model_selection import train_test_split
from sklearn import ensemble
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LinearRegression
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import cohen_kappa_score
from prettytable import PrettyTable
def getBuildResults(database):
with open(database) as json_data:
t = json.load(json_data)
statuses = t['statuses']
clearedStats = []
firstIt = True
for k, v in sorted(statuses.items(), key=lambda statuses: int(statuses[0])):
if firstIt:
firstIt = False
continue
clearedStats.append(int(v=='passed'))
return clearedStats
def getCompilableStats(databaseFile, compFile):
with open(databaseFile) as json_data:
d = json.load(json_data)
commits = d['commits']
with open(compFile) as json_data:
c = json.load(json_data)
compList = []
firstIt = True
for k, v in sorted(commits.items(), key=lambda commits: int(commits[0])):
if firstIt:
firstIt = False
continue
y = next((x for x in c if x['commitID'] == v), None)
compList.append(int(y['compilable']))
return compList
def getVersionDiff(versionDiffFile):
with open(versionDiffFile) as json_data:
d = json.load(json_data)
a2a = []
relInst = []
absInst = []
cvgFrom = []
cvgTarget = []
numNodes = []
numEdges = []
deg = []
for k in sorted(d, key=lambda d: d['toVersion']):
relInst.append(float(k['metrics']['global']['avgRelInst']))
absInst.append(float(k['metrics']['global']['avgAbsInst']))
a2a.append(float(k['metrics']['arcade']['a2a']))
cvgFrom.append(float(k['metrics']['arcade']['cvgSource']))
cvgTarget.append(float(k['metrics']['arcade']['cvgTarget']))
numNodes.append(float(k['metrics']['global']['numNodes']))
numEdges.append(float(k['metrics']['global']['numEdges']))
deg.append(float(k['metrics']['global']['avgNodeDeg']))
a = np.zeros((len(relInst), 8))
for i in range(len(relInst)):
a[i][0] = numNodes[i]
a[i][1] = numEdges[i]
a[i][2] = absInst[i]
a[i][3] = relInst[i]
a[i][4] = deg[i]
a[i][5] = a2a[i]
a[i][6] = cvgFrom[i]
a[i][7] = cvgTarget[i]
return a
def booleanize(y):
# Booleanize:
z = np.zeros(y.shape)
z[y != 0] = 1
return z
def getPrevNext(y, threshold):
z = booleanize(y)
# Biased because of Cross Projects
afterStats = []
for k in range(len(z)):
sum = 0
for i in range(threshold):
if (k+i) < len(z)-1:
sum += y[k+i]
afterStats.append(sum)
beforeStats = []
for k in range(len(z)):
sum = 0
for i in range(threshold):
if (k-i) > 0:
sum += z[k-i]
beforeStats.append(sum)
return (beforeStats, afterStats)
def getStatistics(A, y):
prNx_threshold = [2, 3, 5, 10]
change_threshold = [0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.1, 0.2, 0.5]
for feature in range(A.shape[1]):
print('\n')
print('#'*150)
print(featureList[feature])
samples = A[:, feature]
print('M vs Out ' + str(pearsonr(samples, y)))
for ch_th in change_threshold:
B = (A[:,feature]>ch_th).astype(int)
print('Changes over Threshold ' + str(ch_th) + ': ' + str((B == 1).sum()))
if ((B==1).sum()) > 0:
print('Ch (' + str(ch_th) + ') vs Out : ' + str(spearmanr(B, y)))
failsIfChange = 0
for i in range(len(B)):
if B[i] == 1 and y[i] != 0:
failsIfChange += 1
print('P(fail | change): ' + str(failsIfChange) + '/' + str((B==1).sum()) + ' = ' + str(failsIfChange / (B==1).sum()))
print('P(change | fail): ' + str(failsIfChange) + '/' + str((y!=0).sum()) + ' = ' + str(failsIfChange / (y!=0).sum()))
for pr_th in prNx_threshold:
(before, after) = getPrevNext(y, pr_th)
print('M vs Bef (' + str(pr_th) + '): ' + str(pearsonr(samples[pr_th:], before[pr_th:])))
print('M vs Nxt (' + str(pr_th) + '): ' + str(pearsonr(samples[pr_th:], after[pr_th:])))
for ch_th in change_threshold:
B = (A[:,feature]>ch_th).astype(int)
if ((B==1).sum()) > 0:
print('Ch (' + str(ch_th) + ') vs Bef (' + str(pr_th) + '): ' + str(spearmanr(B[pr_th:], before[pr_th:])))
print('Ch (' + str(ch_th) + ') vs Nxt (' + str(pr_th) + '): ' + str(spearmanr(B[pr_th:], after[pr_th:])))
print('#'*150)
def plotSpecific(A, y):
change_threshold = np.arange(100) * 0.01
samples = A[:, 5]
(before, after) = getPrevNext(y, 10)
(beforeE, afterE) = getPrevNext(y, 80)
corr = []
corrE = []
for ch_th in change_threshold:
B = (samples>ch_th).astype(int)
(co, p) = spearmanr(B[10:], after[10:])
corr.append(co)
(coE, pE) = spearmanr(B[80:], afterE[80:])
corrE.append(coE)
l1, = plt.plot(change_threshold, corr, 'b')
l2, = plt.plot(change_threshold, corrE, 'r')
plt.legend([l1, l2], ['Next 10 Builds', 'Next 80 Builds'], loc=1)
plt.xlim([0, 1])
plt.ylabel('Correlation')
plt.xlabel('Change Threshold')
plt.title('Spearman: ' + featureList[5] + ' vs Builds')
plt.show()
next_threshold = np.arange(1, 250)
B = (samples>0.0).astype(int)
corr = []
for nx_th in next_threshold:
(before, after) = getPrevNext(y, nx_th)
(co, p) = spearmanr(B[nx_th:], before[nx_th:])
corr.append(co)
plt.plot(next_threshold, corr)
plt.xlim([1, 250])
plt.ylabel('Correlation')
plt.xlabel('Previous Builds')
plt.title('Spearman: ' + featureList[5] + ' at (0.0) vs Previous n Builds')
plt.show()
def machineLearn(A, y):
X_train, X_test, y_train, y_test = train_test_split(A, y, test_size=0.33, stratify=y)
clf = ensemble.RandomForestClassifier()
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
tn, fp, fn, tp = confusion_matrix(y_test, pred).ravel()
print ( accuracy_score(y_test, pred))
print (cohen_kappa_score(y_test, pred))
print('TN: ' + str(tn))
print('TP: ' + str(tp))
print('FP: ' + str(fp))
print('FN: ' + str(fn))
def plot(A):
binwidth=0.01
#maximum = [350, 400, 300, 200, 300, 700, 1500 , 1500]
for feature in range(A.shape[1]):
values = A[:, feature]
values = list(filter(lambda a: a != 0, values))
#print(featureList[feature])
#print('Min: ' + str(min(A[:, feature])))
#print('Max: ' + str(max(A[:, feature])))
plt.hist(values, bins=np.arange(min(values), max(values) + binwidth, binwidth))
plt.xlim([0,1])
#plt.ylim([0, maximum[feature]])
plt.xlabel('Change percentage')
plt.ylabel('# Builds')
plt.title(featureList[feature])
#plt.show()
plt.savefig(featureList[feature] + '.pdf')
plt.close()
def metricCorr(A):
t = PrettyTable()
t.field_names = [''] + featureList
for i in range(8):
row = [featureList[i]] + ['', '', '', '', '', '', '', '']
for j in range(8):
(v, p) = pearsonr(A[:,i], A[:,j])
row[j+1] = format(v, '.2g') + ', ' + format(p, '.2g')
t.add_row(row)
print(t)
def getOutlier(A):
degree = A[:, 7]
print(degree.shape)
print(-np.partition(-degree, 3)[:3])
def checkCompVsBuildRes(results, comp):
results = 1-results
booleanComp = np.copy(comp)
booleanComp[comp > 1] = 1
res = []
for i, val in enumerate(results):
if val == 0:
res.append(booleanComp[i] == 0)
else:
res.append(True)
return np.array(res)
def countCompileAndDependencyErrors(comp):
return (comp == 2).sum() + (comp == 1).sum()
def countErrors(comp):
return np.count_nonzero(comp != 0)
def is_non_zero_file(fpath):
# Five bytes to counter empty json arrays
return os.path.isfile(fpath) and os.path.getsize(fpath) > 5
def mergeLogsAndBuildRes(logs, buildRes):
merged = np.zeros(y.shape)
for ind, val in enumerate(y):
if val == 1:
merged[ind] = 0
else:
if c[ind] != 0:
merged[ind] = c[ind]
else:
merged[ind] = 4
return merged
featureList = ['NumNodes', 'NumEdges', 'AbsInst', 'RelInst',
'NodeDegree', 'a2a', 'cvgSource', 'cvgTarget']
#0 = NO comp error
#1 = build passed
baseFol = 'combined/'
buildResults = []
comp = []
for project in os.listdir(baseFol):
databaseFile = baseFol + project + '/database.json'
compilableFile = baseFol + project + '/compilable.json'
buildResults.append(getBuildResults(databaseFile))
print(len(getBuildResults(databaseFile)))
comp.append(getCompilableStats(databaseFile, compilableFile))
buildResults = np.array([z for x in buildResults for z in x])
comp = np.array([z for x in comp for z in x])
print('All analyzed builds: ' + str(len(comp)))
print('Num of errors: ' + str(countErrors(comp)))
print('Num of compilation or dependency errors: ' + str(countCompileAndDependencyErrors(comp)))
print('No Error: ' + str((comp == 0).sum()))
print('Dependency Error: ' + str((comp == 1).sum()))
print('Compilation Error: ' + str((comp == 2).sum()))
print('Test Error: ' + str((comp == 3).sum()))
print('Other Error: ' + str((comp == 4).sum()))
#print((checkCompVsBuildRes(buildResults, comp) == False).sum())
print('Builds Failed: ' + str((buildResults == 0).sum()))
print('\n')
print('Successful analyzed projects')
print('\n')
A = []
y = []
c = []
numProjects = 0
for project in os.listdir(baseFol):
versionDiffFile = baseFol + project + '/versionDiff.json'
databaseFile = baseFol + project + '/database.json'
compilableFile = baseFol + project + '/compilable.json'
if is_non_zero_file(versionDiffFile):
numProjects += 1
A.append(getVersionDiff(versionDiffFile))
y.append(getBuildResults(databaseFile))
c.append(getCompilableStats(databaseFile, compilableFile))
A = np.array([z for x in A for z in x])
y = np.array([z for x in y for z in x])
c = np.array([z for x in c for z in x])
#y: 1 = build passed
#c, merged
#public static int NO_ERROR = 0;
#public static int DEPENDENCY_ERROR = 1;
#public static int COMPILATION_ERROR = 2;
#public static int TEST_ERROR = 3;
#public static int UNKNOWN_ERROR = 4;
merged = mergeLogsAndBuildRes(c, y)
print('Number of Projects: ' + str(numProjects))
#c[c == 2] = 1
#c[c == 3] = 0
#print(c)
print('All analyzed builds: ' + str(len(merged)))
print('Num of errors: ' + str(countErrors(merged)))
print('Num of compilation or dependency errors: ' + str(countCompileAndDependencyErrors(merged)))
print('No Error: ' + str((merged == 0).sum()))
print('Dependency Error: ' + str((merged == 1).sum()))
print('Compilation Error: ' + str((merged == 2).sum()))
print('Test Error: ' + str((merged == 3).sum()))
print('Other Error: ' + str((merged == 4).sum()))
passed = 0
for i in range(len(merged)):
if merged[i] == 0:
passed += 1
print('Pass Rate: ' + str(passed) + ' / ' + str(len(c)))
print('Passes: ' + str(passed / len(c)))
print('Change Rate per Metric: ' + str(np.count_nonzero(A, axis=0) / passed))
metricCorr(A)
getStatistics(A, merged)
machineLearn(A, booleanize(merged))
plot(A)
plotSpecific(A, merged)
|
import logging
import pathlib
import pkg_resources
import re
from mopidy import config, ext
from tornado.web import StaticFileHandler
from .file_server import FileServer
__version__ = pkg_resources.get_distribution("Mopidy-Mowecl").version
logger = logging.getLogger(__name__)
class ConfigColor(config.String):
def deserialize(self, value):
value = super().deserialize(value)
if not re.fullmatch("#[0-9A-Fa-f]{6}", value):
raise ValueError(f"Colors must be in the #AAAAAA format; {value} is not")
return value
class Extension(ext.Extension):
dist_name = "Mopidy-Mowecl"
ext_name = "mowecl"
version = __version__
def get_default_config(self):
return config.read(pathlib.Path(__file__).parent / "ext.conf")
def get_config_schema(self):
schema = super().get_config_schema()
schema["theme_type"] = config.String(optional=True, choices=["light", "dark"])
schema["background_color"] = ConfigColor(optional=True)
schema["text_color"] = ConfigColor(optional=True)
schema["primary_color"] = ConfigColor(optional=True)
schema["seek_update_interval"] = config.Integer()
schema["search_history_length"] = config.Integer()
schema["disable_dnd"] = config.Boolean()
schema["small_screen"] = config.Boolean()
schema["key_play_pause"] = config.String(optional=True)
schema["key_next_track"] = config.String(optional=True)
schema["key_previous_track"] = config.String(optional=True)
schema["key_rewind_track"] = config.String(optional=True)
schema["key_volume_up"] = config.String(optional=True)
schema["key_volume_down"] = config.String(optional=True)
return schema
def setup(self, registry):
logger.info("VERSION %s", self.version)
registry.add(
"http:app",
{
"name": self.ext_name,
"factory": self.factory
},
)
def factory(self, config, core):
path = pathlib.Path(__file__).parent / "static"
server_params = {
"path": path, "config": config, "mowecl_version": self.version
}
return [
(r"/(index.html)", server_params),
(r"/", FileServer, server_params),
(r"/(.*)", StaticFileHandler, {"path": path}),
]
|
from kivy.uix.widget import Widget
class TicTacToeBoard(Widget):
pass
|
from astropy.io import ascii
import numpy as np
import matplotlib.pyplot as plt
from astropy.time import Time
from astropy.table import Table
photbl = ascii.read('phot.dat')
#------------------------------------------------------------
bandlist = []
for inim in photbl['obs']:
bandlist.append(inim.split('-')[5])
photbl['band'] = np.array(bandlist)
#------------------------------------------------------------
objlist = []
for inim in photbl['obs']:
objlist.append(inim.split('-')[2])
photbl['object'] = np.array(objlist)
datelist = []
for inim in photbl['obs']:
part = inim.split('-')
datelist.append('20'+part[3][0:2]+'-'+part[3][2:4]+'-'+part[3][4:6]+'T'+part[4][0:2]+':'+part[4][2:4]+':'+part[4][4:])
photbl['date-obs'] = np.array(datelist)
jdlist = []
for date in photbl['date-obs']:
jdlist.append(Time(date, format='isot', scale='utc').jd)
photbl['jd'] = np.array(jdlist)
#photbl['obs'] = 'UKIRT'
newtbl = Table()
newtbl['object'], newtbl['obs'], newtbl['band'], newtbl['date-obs'], newtbl['jd'], newtbl['seeing'], newtbl['mag'] = photbl['object'], photbl['obs'], photbl['band'], photbl['date-obs'], photbl['jd'], photbl['seeing'], photbl['mag']
newtbl.write('phot_ukirt.dat', format='ascii', overwrite=True)
#------------------------------------------------------------
photbl[ (photbl['band']=='R') & (photbl['seeing']<3.0) & (photbl['mag']>20.0)]
photbl[ (photbl['band']=='V') & (photbl['seeing']<4.0) & (photbl['mag']>19.0) ]
photbl[ (photbl['band']=='B') & (photbl['seeing']<2.5) & (photbl['mag']>20.6) ]
import os, glob
path_qso = '/data3/IMSNG/LOAO/gal'
path_gundam = '/mnt/window/Users/User/Downloads/data/loao/ref'
downcom = 'sshpass -prjseka23! scp -ro StrictHostKeyChecking=no paek@qso.snu.ac.kr:'
#reflist = ['NGC0488', 'NGC1309', 'UGC02855', 'NGC2207', 'NGC2993', 'IC2537', 'NGC3169', 'NGC3294', 'NGC3344', 'NGC3629', 'NGC3646', 'NGC3938', 'NGC4030', 'NGC4108']
#reflist = ['NGC3169', 'NGC3294', 'NGC3344', 'NGC3629', 'NGC3646', 'NGC3938']
#reflist = ['NGC0488', 'NGC1309', 'NGC2207', 'NGC2993', 'UGC02855', 'IC2537', 'NGC1385']
reflist = ['M95', 'M66', 'M98', 'M86', 'M87', 'M91', 'M90', 'M58', 'M64', 'M63']
for obj in reflist:
os.system('mkdir '+path_gundam+'/'+obj+'/')
com = downcom+path_qso+'/'+obj+'/C*.fits '+path_gundam+'/'+obj+'/'
print(com)
os.system(com)
|
import configparser
import os
from kakao.common import fill_str_with_space
def is_in_range(coord_type, coord, user_min_x=-180.0, user_max_y=90.0):
korea_coordinate = { # Republic of Korea coordinate
"min_x": 124.5,
"max_x": 132.0,
"min_y": 33.0,
"max_y": 38.9
}
try:
if coord_type == "x":
return max(korea_coordinate["min_x"], user_min_x) <= float(coord) <= korea_coordinate["max_x"]
elif coord_type == "y":
return korea_coordinate["min_y"] <= float(coord) <= min(korea_coordinate["max_y"], user_max_y)
else:
return False
except ValueError:
# float 이외 값 입력 방지
return False
# pylint: disable=too-many-branches
def input_config():
vaccine_candidates = [
{"name": "아무거나", "code": "ANY"},
{"name": "화이자", "code": "VEN00013"},
{"name": "모더나", "code": "VEN00014"},
{"name": "아스트라제네카", "code": "VEN00015"},
{"name": "얀센", "code": "VEN00016"},
{"name": "(미사용)", "code": "VEN00017"},
{"name": "(미사용)", "code": "VEN00018"},
{"name": "(미사용)", "code": "VEN00019"},
{"name": "(미사용)", "code": "VEN00020"},
]
vaccine_type = None
while True:
print("=== 백신 목록 ===")
for vaccine in vaccine_candidates:
if vaccine["name"] == "(미사용)":
continue
print(
f"{fill_str_with_space(vaccine['name'], 10)} : {vaccine['code']}")
vaccine_type = str.upper(input("예약시도할 백신 코드를 알려주세요: ").strip())
if any(x["code"] == vaccine_type for x in vaccine_candidates) or vaccine_type.startswith("FORCE:"):
if vaccine_type.startswith("FORCE:"):
vaccine_type = vaccine_type[6:]
print("경고: 강제 코드 입력모드를 사용하셨습니다.\n" +
"이 모드는 새로운 백신이 예약된 코드로 **등록되지 않은 경우에만** 사용해야 합니다.\n" +
"입력하신 코드가 정상적으로 작동하는 백신 코드인지 필히 확인해주세요.\n" +
f"현재 코드: '{vaccine_type}'\n")
if len(vaccine_type) != 8 or not vaccine_type.startswith("VEN") or not vaccine_type[3:].isdigit():
print("입력하신 코드가 현재 알려진 백신 코드 형식이랑 맞지 않습니다.")
proceed = str.lower(input("진행하시겠습니까? Y/N : "))
if proceed == "y":
pass
elif proceed == "n":
continue
else:
print("Y 또는 N을 입력해 주세요.")
continue
if next((x for x in vaccine_candidates if x["code"] == vaccine_type), {"name": ""})["name"] == "(미사용)":
print("현재 프로그램 버전에서 백신 이름이 등록되지 않은, 추후를 위해 미리 넣어둔 백신 코드입니다.\n" +
"입력하신 코드가 정상적으로 작동하는 백신 코드인지 필히 확인해주세요.\n" +
f"현재 코드: '{vaccine_type}'\n")
break
else:
print("백신 코드를 확인해주세요.")
print("사각형 모양으로 백신범위를 지정한 뒤, 해당 범위 안에 있는 백신을 조회해서 남은 백신이 있으면 해당 병원에 예약을 시도합니다.")
print("경위도는 구글 맵에서 원하는 위치를 우클릭하여 복사할 수 있습니다.")
top_x = None
top_y = None
while top_x is None or top_y is None:
top_y, top_x = input("사각형의 왼쪽 위 경위도를 넣어주세요. 37.28631662121671, 126.81741443463375: ").strip().split(",")
if not is_in_range(coord_type="x", coord=top_x) or not is_in_range(coord_type="y", coord=top_y):
print(f"올바른 좌표 값이 아닙니다. 입력 값 : {top_y}, {top_x}")
top_x = None
top_y = None
else:
top_x = top_x.strip()
top_y = top_y.strip()
bottom_x = None
bottom_y = None
while bottom_x is None or bottom_y is None:
bottom_y, bottom_x = input("사각형의 오른쪽 아래 경위도를 넣어주세요. 37.28631662121671, 126.81741443463375: ").strip().split(",")
if not is_in_range(coord_type="x", coord=bottom_x) or not is_in_range(coord_type="y", coord=bottom_y):
print(f"올바른 좌표 값이 아닙니다. 입력 값 : {bottom_y}, {bottom_x}")
bottom_x = None
bottom_y = None
else:
bottom_x = bottom_x.strip()
bottom_y = bottom_y.strip()
only_left = None
while only_left is None:
only_left = str.lower(input("남은 잔여백신이 있는 병원만 조회하시겠습니까? Y/N : "))
if only_left == "y":
only_left = True
elif only_left == "n":
only_left = False
else:
print("Y 또는 N을 입력해 주세요.")
only_left = None
dump_config(vaccine_type, top_x, top_y, bottom_x, bottom_y, only_left)
return vaccine_type, top_x, top_y, bottom_x, bottom_y, only_left
# 기존 입력 값 로딩
def load_config():
config_parser = configparser.ConfigParser()
if os.path.exists('config.ini'):
try:
config_parser.read('config.ini')
while True:
skip_input = str.lower(input("기존에 입력한 정보로 재검색하시겠습니까? Y/N : "))
if skip_input == "y":
skip_input = True
break
elif skip_input == "n":
skip_input = False
break
else:
print("Y 또는 N을 입력해 주세요.")
if skip_input:
try:
# 설정 파일이 있으면 최근 로그인 정보 로딩
configuration = config_parser['config']
previous_used_type = configuration["VAC"]
previous_top_x = configuration["topX"]
previous_top_y = configuration["topY"]
previous_bottom_x = configuration["botX"]
previous_bottom_y = configuration["botY"]
previous_only_left = configuration["onlyLeft"] == "True"
return previous_used_type, previous_top_x, previous_top_y, previous_bottom_x, previous_bottom_y, previous_only_left
except KeyError:
print('기존에 입력한 정보가 없습니다.')
else:
return None, None, None, None, None, None
except ValueError:
return None, None, None, None, None, None
return None, None, None, None, None, None
# pylint: disable=too-many-arguments
def dump_config(vaccine_type, top_x, top_y, bottom_x, bottom_y, only_left, search_time=0.2):
config_parser = configparser.ConfigParser()
config_parser['config'] = {}
conf = config_parser['config']
conf['VAC'] = vaccine_type
conf["topX"] = top_x
conf["topY"] = top_y
conf["botX"] = bottom_x
conf["botY"] = bottom_y
conf["search_time"] = str(search_time)
conf["onlyLeft"] = "True" if only_left else "False"
with open("config.ini", "w") as config_file:
config_parser.write(config_file)
def load_search_time():
config_parser = configparser.ConfigParser()
search_time = 0.1
if os.path.exists('config.ini'):
config_parser.read('config.ini')
input_time = config_parser.getfloat('config', 'search_time', fallback=0.1)
if input_time < 0.1:
confirm_input = None
while confirm_input is None:
confirm_input = str.lower(input("과도하게 딜레이를 줄이면 계정 정지의 위험이 있습니다. 계속하시겠습니까? Y/N : "))
if confirm_input == "y":
search_time = input_time
elif confirm_input == "n":
print("검색 주기가 기본값 0.2로 설정되었습니다.")
else:
print("Y 또는 N을 입력해 주세요.")
confirm_input = None
else:
search_time = input_time
return search_time
|
# Copyright 2022 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for type_parsing.py."""
from absl.testing import absltest
from google3.third_party.mujoco.introspect import ast_nodes
from google3.third_party.mujoco.introspect import type_parsing
class TypeParsingTest(absltest.TestCase):
def test_parse_complex_type(self):
parsed_type = type_parsing.parse_type(
'int unsigned volatile long const long'+
'(**const(*const restrict*[9])[7])[3][4]')
expected_type = ast_nodes.ArrayType(
extents=[9],
inner_type=ast_nodes.PointerType(
ast_nodes.PointerType(
is_const=True,
is_restrict=True,
inner_type=ast_nodes.ArrayType(
extents=[7],
inner_type=ast_nodes.PointerType(
is_const=True,
inner_type=ast_nodes.PointerType(
ast_nodes.ArrayType(
extents=(3, 4),
inner_type=ast_nodes.ValueType(
'int unsigned long long',
is_const=True, is_volatile=True)
)
)
)
)
)
)
)
self.assertEqual(parsed_type, expected_type)
if __name__ == '__main__':
absltest.main()
|
import aiohttp
from aiomodrinth.common import BASE_URL
async def get(way: str, headers=None, **kwargs) -> aiohttp.ClientResponse:
async with aiohttp.ClientSession(headers=headers) as s:
resp = await s.get(url=f"{BASE_URL}{way}", **kwargs)
return resp
async def post(way: str, payload: dict | list, headers=None, **kwargs) -> aiohttp.ClientResponse:
async with aiohttp.ClientSession(headers=headers) as s:
resp = await s.post(url=f"{BASE_URL}{way}", data=payload, **kwargs)
return resp
async def patch(way: str, headers=None, **kwargs) -> aiohttp.ClientResponse:
async with aiohttp.ClientSession(headers=headers) as s:
resp = await s.patch(url=f"{BASE_URL}{way}", **kwargs)
return resp
async def delete(way: str, headers=None, **kwargs) -> aiohttp.ClientResponse:
async with aiohttp.ClientSession(headers=headers) as s:
resp = await s.delete(url=f"{BASE_URL}{way}", **kwargs)
return resp
|
import numpy as np
import pytest
import scipy.sparse as ss
import scipy.special as sp
import scipy.stats as st
import scmodes.ebpm.sgd
import torch
import torch.utils.data as td
from .fixtures import *
def test__nb_llik(simulate_gamma):
x, s, log_mu, log_phi, oracle_llik = simulate_gamma
llik = scmodes.ebpm.sgd._nb_llik(torch.tensor(x, dtype=torch.float),
torch.tensor(s, dtype=torch.float),
torch.tensor(log_mu, dtype=torch.float),
torch.tensor(-log_phi, dtype=torch.float)).sum()
assert np.isclose(llik, oracle_llik)
def test__zinb_llik(simulate_gamma):
x, s, log_mu, log_phi, oracle_llik = simulate_gamma
llik = scmodes.ebpm.sgd._zinb_llik(torch.tensor(x, dtype=torch.float),
torch.tensor(s, dtype=torch.float),
torch.tensor(log_mu, dtype=torch.float),
torch.tensor(-log_phi, dtype=torch.float),
torch.tensor(-100, dtype=torch.float)).sum()
assert np.isclose(llik, oracle_llik)
def test__zinb_llik_zinb_data(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, oracle_llik = simulate_point_gamma
n, p = x.shape
llik = scmodes.ebpm.sgd._zinb_llik(torch.tensor(x, dtype=torch.float),
torch.tensor(s, dtype=torch.float),
torch.tensor(log_mu, dtype=torch.float),
torch.tensor(-log_phi, dtype=torch.float),
torch.tensor(logodds, dtype=torch.float)).sum()
assert np.isclose(llik, oracle_llik)
def test_ebpm_gamma_batch(simulate_gamma):
x, s, log_mu, log_phi, l0 = simulate_gamma
n, p = x.shape
log_mu_hat, neg_log_phi_hat, l1 = scmodes.ebpm.sgd.ebpm_gamma(x, s, batch_size=n, max_epochs=2000)
assert np.isfinite(log_mu_hat).all()
assert np.isfinite(neg_log_phi_hat).all()
assert log_mu_hat.shape == (1, p)
assert neg_log_phi_hat.shape == (1, p)
assert l1 > l0
def test_ebpm_gamma_minibatch(simulate_gamma):
x, s, log_mu, log_phi, l0 = simulate_gamma
n, p = x.shape
log_mu_hat, neg_log_phi_hat, l1 = scmodes.ebpm.sgd.ebpm_gamma(x, s, batch_size=100, max_epochs=100)
assert np.isfinite(log_mu_hat).all()
assert np.isfinite(neg_log_phi_hat).all()
assert log_mu_hat.shape == (1, p)
assert neg_log_phi_hat.shape == (1, p)
assert l1 > l0
def test_ebpm_gamma_sgd(simulate_gamma):
x, s, log_mu, log_phi, l0 = simulate_gamma
n, p = x.shape
# Important: learning rate has to lowered to compensate for increased
# variance in gradient estimator
log_mu_hat, neg_log_phi_hat, l1 = scmodes.ebpm.sgd.ebpm_gamma(x, s, batch_size=1, max_epochs=10, lr=5e-3)
assert np.isfinite(log_mu_hat).all()
assert np.isfinite(neg_log_phi_hat).all()
assert log_mu_hat.shape == (1, p)
assert neg_log_phi_hat.shape == (1, p)
assert l1 > l0
def test_ebpm_gamma_trace(simulate_gamma):
x, s, log_mu, log_phi, l0 = simulate_gamma
n, p = x.shape
max_epochs = 5
log_mu_hat, neg_log_phi_hat, l1, trace = scmodes.ebpm.sgd.ebpm_gamma(x[:,0].reshape(-1, 1), s, batch_size=n, max_epochs=max_epochs, trace=True)
assert len(trace) == max_epochs
def test_ebpm_point_gamma_oracle_init(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
n, p = x.shape
log_mu_hat, neg_log_phi_hat, logodds_hat, l1 = scmodes.ebpm.sgd.ebpm_point_gamma(x, s, init=(log_mu, -log_phi), batch_size=n, max_epochs=2000)
assert np.isfinite(log_mu_hat).all()
assert np.isfinite(neg_log_phi_hat).all()
assert np.isfinite(logodds_hat).all()
assert log_mu_hat.shape == (1, p)
assert neg_log_phi_hat.shape == (1, p)
assert logodds.shape == (1, p)
assert l1 > l0
def test_ebpm_point_gamma_batch(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
n, p = x.shape
log_mu_hat, neg_log_phi_hat, logodds_hat, l1 = scmodes.ebpm.sgd.ebpm_point_gamma(x, s, batch_size=n, max_epochs=2000)
assert np.isfinite(log_mu_hat).all()
assert np.isfinite(neg_log_phi_hat).all()
assert np.isfinite(logodds_hat).all()
assert log_mu_hat.shape == (1, p)
assert neg_log_phi_hat.shape == (1, p)
assert logodds.shape == (1, p)
assert l1 > l0
def test_ebpm_point_gamma_sparse(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
y = ss.csr_matrix(x)
n, p = x.shape
log_mu_hat, neg_log_phi_hat, logodds_hat, l1 = scmodes.ebpm.sgd.ebpm_point_gamma(y, batch_size=100, max_epochs=100)
assert np.isfinite(log_mu_hat).all()
assert np.isfinite(neg_log_phi_hat).all()
assert np.isfinite(logodds_hat).all()
assert log_mu_hat.shape == (1, p)
assert neg_log_phi_hat.shape == (1, p)
assert logodds.shape == (1, p)
assert l1 > l0
def test_ebpm_point_gamma_minibatch(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
n, p = x.shape
log_mu_hat, neg_log_phi_hat, logodds_hat, l1 = scmodes.ebpm.sgd.ebpm_point_gamma(x, s, batch_size=100, max_epochs=100)
assert np.isfinite(log_mu_hat).all()
assert np.isfinite(neg_log_phi_hat).all()
assert np.isfinite(logodds_hat).all()
assert log_mu_hat.shape == (1, p)
assert neg_log_phi_hat.shape == (1, p)
assert logodds.shape == (1, p)
assert l1 > l0
def test_ebpm_point_gamma_sgd(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
n, p = x.shape
# Important: learning rate has to lowered to compensate for increased
# variance in gradient estimator
log_mu_hat, neg_log_phi_hat, logodds_hat, l1 = scmodes.ebpm.sgd.ebpm_point_gamma(x, s, batch_size=1, max_epochs=10, lr=5e-3)
assert np.isfinite(log_mu_hat).all()
assert np.isfinite(neg_log_phi_hat).all()
assert np.isfinite(logodds_hat).all()
assert log_mu_hat.shape == (1, p)
assert neg_log_phi_hat.shape == (1, p)
assert logodds.shape == (1, p)
assert l1 > l0
def test_ebpm_point_gamma_trace(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
n, p = x.shape
max_epochs = 2000
log_mu_hat, neg_log_phi_hat, logodds_hat, l1, trace = scmodes.ebpm.sgd.ebpm_point_gamma(x[:,0].reshape(-1, 1), s, batch_size=n, max_epochs=max_epochs, trace=True)
assert len(trace) == max_epochs
def test_EBPMDataset_init(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
y = ss.csr_matrix(x)
data = scmodes.ebpm.sgd.EBPMDataset(y, s)
assert len(data) == y.shape[0]
if torch.cuda.is_available():
assert (data.data.cpu().numpy() == y.data).all()
assert (data.indices.cpu().numpy() == y.indices).all()
assert (data.indptr.cpu().numpy() == y.indptr).all()
else:
assert (data.data.numpy() == y.data).all()
assert (data.indices.numpy() == y.indices).all()
assert (data.indptr.numpy() == y.indptr).all()
def test_EBPMDataset_init_dense(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
data = scmodes.ebpm.sgd.EBPMDataset(x, s)
if torch.cuda.is_available():
y = ss.csr_matrix((data.data.cpu().numpy(), data.indices.cpu().numpy(), data.indptr.cpu().numpy()))
else:
y = ss.csr_matrix((data.data.numpy(), data.indices.numpy(), data.indptr.numpy()))
assert (y.todense() == x).all()
def test_EBPMDataset_init_coo(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
y = ss.coo_matrix(x)
data = scmodes.ebpm.sgd.EBPMDataset(y, s)
if torch.cuda.is_available():
z = ss.csr_matrix((data.data.cpu().numpy(), data.indices.cpu().numpy(), data.indptr.cpu().numpy())).tocoo()
else:
z = ss.csr_matrix((data.data.numpy(), data.indices.numpy(), data.indptr.numpy())).tocoo()
# This is more efficient than ==
assert not (y != z).todense().any()
def test_EBPMDataset__get_item__(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
data = scmodes.ebpm.sgd.EBPMDataset(x, s)
y = data[0]
assert y == 0
def test_EBPMDataset_collate_fn(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
data = scmodes.ebpm.sgd.EBPMDataset(x, s)
batch_size = 10
y, t = data.collate_fn(range(batch_size))
if torch.cuda.is_available():
assert (y.cpu().numpy() == x[:batch_size]).all()
assert (t.cpu().numpy() == s[:batch_size]).all()
else:
assert (y.numpy() == x[:batch_size]).all()
assert (t.numpy() == s[:batch_size]).all()
def test_EBPMDataset_collate_fn_shuffle(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
data = scmodes.ebpm.sgd.EBPMDataset(x, s)
idx = [10, 20, 30, 40, 50]
y, t = data.collate_fn(idx)
if torch.cuda.is_available():
assert (y.cpu().numpy() == x[idx]).all()
assert (t.cpu().numpy() == s[idx]).all()
else:
assert (y.numpy() == x[idx]).all()
assert (t.numpy() == s[idx]).all()
def test_EBPMDataset_DataLoader(simulate_point_gamma):
x, s, log_mu, log_phi, logodds, l0 = simulate_point_gamma
batch_size = 10
sparse_data = scmodes.ebpm.sgd.EBPMDataset(x, s)
data = td.DataLoader(sparse_data, batch_size=batch_size, shuffle=False, collate_fn=sparse_data.collate_fn)
y, t = next(iter(data))
if torch.cuda.is_available():
assert (y.cpu().numpy() == x[:batch_size]).all()
assert (t.cpu().numpy() == s[:batch_size]).all()
else:
assert (y.numpy() == x[:batch_size]).all()
assert (t.numpy() == s[:batch_size]).all()
|
import yaml as _yaml
import pathlib as _pathlib
from importlib import import_module
from opics.libraries.catalogue_mgmt import download_library, remove_library
import sys
_curr_dir = _pathlib.Path(__file__).parent.resolve()
# read yaml file for available libraries in the catalogue
with open(_curr_dir / "catalogue.yaml", "r") as _stream:
library_catalogue = _yaml.safe_load(_stream)
def _import_external_libraries(library_catalogue):
installed_libraries = []
for each_lib in library_catalogue.keys():
if library_catalogue[each_lib]["installed"]:
sys.path.append(f"{library_catalogue[each_lib]['library_path']}")
installed_libraries.append(each_lib)
globals()[each_lib] = import_module(each_lib)
return installed_libraries
installed_libraries = _import_external_libraries(library_catalogue)
__all__ = [
"download_library",
"remove_library",
]
|
from .schema import Schema, RootSchema
|
"""
Given an integer array nums, find the sum of the elements between indices i and j (i ≤ j), inclusive.
Example:
Given nums = [-2, 0, 3, -5, 2, -1]
sumRange(0, 2) -> 1
sumRange(2, 5) -> -1
sumRange(0, 5) -> -3
Note:
You may assume that the array does not change.
There are many calls to sumRange function.
"""
class NumArray(object):
def __init__(self, nums):
"""
initialize your data structure here.
:type nums: List[int]
"""
self.size=len(nums)
self.sums=[0]*(self.size+1)
for i in range(self.size):
self.sums[i+1]=self.sums[i]+nums[i]
def sumRange(self, i, j):
"""
sum of elements nums[i..j], inclusive.
:type i: int
:type j: int
:rtype: int
"""
return self.sums[j+1]-self.sums[i]
|
from colorthief import analyse_image
def test_analyse_image_not_fancy(capsys):
imgpath = 'images/sunset.jpg'
analyse_image(imgpath, do_print_fancy=False)
expected_stdout = """(163, 143, 178)
(9, 6, 5)
(99, 35, 32)
(246, 222, 171)
(151, 82, 64)
"""
captured = capsys.readouterr()
assert captured.out == expected_stdout
|
# Write a function that reverses a string. The input string is given as an array of characters s.
# You must do this by modifying the input array in-place with O(1) extra memory.
# Example 1:
# Input: s = ["h","e","l","l","o"]
# Output: ["o","l","l","e","h"]
# Example 2:
# Input: s = ["H","a","n","n","a","h"]
# Output: ["h","a","n","n","a","H"]
class Solution(object):
def reverseString(self, s):
"""
:type s: List[str]
:rtype: None Do not return anything, modify s in-place instead.
"""
i, j = 0, len(s) - 1
while i < j:
s[i], s[j] = s[j], s[i]
i += 1
j -= 1
|
import enum
class Locale(enum.Enum):
ENGLISH_AU = "en-AU"
ENGLISH_CA = "en-CA"
ENGLISH_GB = "en-GB"
ENGLISH_IN = "en-IN"
ENGLISH_US = "en-US"
ENGLISH_ES = "en-ES"
GERMAN = "de-DE"
SPANISH_ES = "es-ES"
SPANISH_MX = "es-MX"
FRENCH_CA = "fr-CA"
FRENCH_FR = "fr-FR"
ITALIAN = "it-IT"
JAPANESE = "ja-JP"
|
"""
Conditional
===========
Rules can be applied conditionally using the `If` statement. These will make
more sense when you can define variables.
Examples::
If $dosub {
Substitute a -> b;
}
"""
import fontFeatures
from .util import compare
from . import FEZVerb
PARSEOPTS = dict(use_helpers=True)
GRAMMAR = """
boolean_condition: comparison | (boolean_term | not_boolean_term)
boolean_term: integer_container COMPARATOR integer_container | integer_container
not_boolean_term: "not" boolean_term
comparison: (boolean_term | not_boolean_term) AND_OR (boolean_term | not_boolean_term)
AND_OR: ("&" | "|")
"""
If_GRAMMAR = """
?start: action
action: boolean_condition "{" statement+ "}"
"""
If_beforebrace_GRAMMAR = """
?start: beforebrace
beforebrace: boolean_condition
"""
VERBS = ["If"]
class If(FEZVerb):
def __init__(self, parser):
self.parser = parser
def comparison(self, args):
(l, comparator, r) = args
if comparator.value == "&":
return l and r
elif comparator.value == "|":
return l or r
else:
raise ValueError("Unrecognized comparator")
def boolean_term(self, args):
if len(args) == 1:
return bool(args[0].resolve_as_bool())
(l, comparator, r) = args
return compare(l.resolve_as_integer(), comparator, r.resolve_as_integer())
def boolean_condition(self, args):
return args[0]
def not_boolean_term(self, args):
(boolean_term,) = args
return not boolean_term
def action(self, args):
(boolean, statements, _) = args
boolean = self.parser.expand_statements(boolean)
if bool(boolean[0]):
return self.parser.expand_statements(statements)
else:
return []
|
import numpy as np
import matplotlib.pyplot as plt
from mlxtend.plotting import plot_decision_regions
from sklearn import svm
import os.path as p
current_dir = p.dirname(p.realpath(__file__))
# 2x500 matrix. value is random
matrix = np.random.randn(500, 2)
"""
calculate xor by each row, and get 1 or -1 convert from True or False
[X1 xor Y1, X2 xor Y2, ..., Xn xor Yn]
#=> ex.) [1, -1, ..., 1]
"""
xor = np.logical_xor(matrix[:, 0] > 0, matrix[:, 1] > 0)
xor = np.where(xor, 1, -1)
x1 = matrix[xor == 1, 0]
y1 = matrix[xor == 1, 1]
plt.scatter(x1, y1, c='b', marker='x', label='1')
x2 = matrix[xor == -1, 0]
y2 = matrix[xor == -1, 1]
plt.scatter(x2, y2, c='r', marker='x', label='-1')
plt.legend(loc='upper left')
plt.savefig(p.join(current_dir, './svm_non_linear_xor.png'))
plt.cla()
# non linear svm
X = matrix
y = xor
svm = svm.SVC(kernel='rbf', random_state=1, gamma=0.10, C=10.0)
svm.fit(X, y)
plot_decision_regions(X, y, clf=svm)
plt.legend(loc='upper left')
plt.tight_layout()
plt.savefig(p.join(current_dir, './svm_non_linear_xor_with_decision_regions.png'))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Python -*-
"""
@file RSNPUnitConnector.py
@brief send data to Unit and recieve data from Unit
@date $Date$
"""
import sys
import time
import json
sys.path.append(".")
# Import RTM module
import RTC
import OpenRTM_aist
import MQTTClient
# Import Service implementation class
# <rtc-template block="service_impl">
# </rtc-template>
# Import Service stub modules
# <rtc-template block="consumer_import">
# </rtc-template>
# This module's spesification
# <rtc-template block="module_spec">
rsnpunitconnector_spec = ["implementation_id", "RSNPUnitConnector",
"type_name", "RSNPUnitConnector",
"description", "send data to Unit and recieve data from Unit",
"version", "1.0.0",
"vendor", "KoichiroKato",
"category", "Category",
"activity_type", "STATIC",
"max_instance", "1",
"language", "Python",
"lang_type", "SCRIPT",
"conf.default.UnitHostname", "rsnpunit",
"conf.__widget__.UnitHostname", "text",
"conf.__type__.UnitHostname", "string",
""]
# </rtc-template>
##
# @class RSNPUnitConnector
# @brief send data to Unit and recieve data from Unit
#
#
class RSNPUnitConnector(OpenRTM_aist.DataFlowComponentBase):
##
# @brief constructor
# @param manager Maneger Object
#
def __init__(self, manager):
OpenRTM_aist.DataFlowComponentBase.__init__(self, manager)
self._d_stringIn = RTC.TimedString(RTC.Time(0,0), "")
"""
"""
self._stringInIn = OpenRTM_aist.InPort("stringIn", self._d_stringIn)
self._d_robotPose2DIn = RTC.TimedPose2D(RTC.Time(0,0), RTC.Pose2D(RTC.Point2D(0.0,0.0), 0.0))
"""
"""
self._robotPose2DInIn = OpenRTM_aist.InPort("robotPose2DIn", self._d_robotPose2DIn)
self._d_countIn = RTC.TimedShort(RTC.Time(0,0), 0)
"""
"""
self._countInIn = OpenRTM_aist.InPort("countIn", self._d_countIn)
self._d_velocityOut = RTC.TimedVelocity2D(RTC.Time(0,0), RTC.Velocity2D(0.0,0.0,0.0))
"""
"""
self._velocityOutOut = OpenRTM_aist.OutPort("velocityOut", self._d_velocityOut)
# initialize of configuration-data.
# <rtc-template block="init_conf_param">
"""
- Name: UnitHostname
- DefaultValue: rsnpunit
"""
self._UnitHostname = ['rsnpunit']
# </rtc-template>
##
#
# The initialize action (on CREATED->ALIVE transition)
# formaer rtc_init_entry()
#
# @return RTC::ReturnCode_t
#
#
def onInitialize(self):
# Bind variables and configuration variable
self.bindParameter("UnitHostname", self._UnitHostname, "rsnpunit")
# Set InPort buffers
self.addInPort("stringIn",self._stringInIn)
self.addInPort("robotPose2DIn",self._robotPose2DInIn)
self.addInPort("countIn",self._countInIn)
# Set OutPort buffers
self.addOutPort("velocityOut",self._velocityOutOut)
# Set service provider to Ports
# Set service consumers to Ports
# Set CORBA Service Ports
return RTC.RTC_OK
###
##
## The finalize action (on ALIVE->END transition)
## formaer rtc_exiting_entry()
##
## @return RTC::ReturnCode_t
#
##
#def onFinalize(self):
#
# return RTC.RTC_OK
###
##
## The startup action when ExecutionContext startup
## former rtc_starting_entry()
##
## @param ec_id target ExecutionContext Id
##
## @return RTC::ReturnCode_t
##
##
#def onStartup(self, ec_id):
#
# return RTC.RTC_OK
###
##
## The shutdown action when ExecutionContext stop
## former rtc_stopping_entry()
##
## @param ec_id target ExecutionContext Id
##
## @return RTC::ReturnCode_t
##
##
#def onShutdown(self, ec_id):
#
# return RTC.RTC_OK
##
#
# The activated action (Active state entry action)
# former rtc_active_entry()
#
# @param ec_id target ExecutionContext Id
#
# @return RTC::ReturnCode_t
#
#
def onActivated(self, ec_id):
print("Activate and connect unit")
self.mqttc = MQTTClient.MyMQTTClass()
self.mqttc.run(self._UnitHostname[0], "fromServer/Velocity")
return RTC.RTC_OK
##
#
# The deactivated action (Active state exit action)
# former rtc_active_exit()
#
# @param ec_id target ExecutionContext Id
#
# @return RTC::ReturnCode_t
#
#
def onDeactivated(self, ec_id):
print("Deactivate")
return RTC.RTC_OK
##
#
# The execution action that is invoked periodically
# former rtc_active_do()
#
# @param ec_id target ExecutionContext Id
#
# @return RTC::ReturnCode_t
#
#
def onExecute(self, ec_id):
# MQTT subscribe data
# data format : {"robotID":"", "vx":"", "va":"", "option":"", "timestamp":""}
if self.mqttc.isNew():
if self.mqttc.recieve_data=="NoData":
self._d_velocityOut.data.vx = 0
self._d_velocityOut.data.va = 0
self._d_velocityOut.data.vy = 0
self._velocityOutOut.write()
else:
print(self.mqttc.recieve_data)
self.velocity = json.loads(self.mqttc.recieve_data)
vx = float(self.velocity["vx"]) / 1500
va = float(self.velocity["va"]) / 1500
self._d_velocityOut.data.vx = vx
self._d_velocityOut.data.va = va
self._d_velocityOut.data.vy = 0
self._velocityOutOut.write()
# MQTT publish data
send_data_dict = {"data_type":"","data":""}
if self._stringInIn.isNew():
self._stringInIn.read()
send_data_dict["data_type"] = "other"
send_data_dict["data"] = self._d_stringIn.data
send_data_json = json.dumps(send_data_dict)
self.mqttc.publish_message(self._UnitHostname[0], "toUnit/Robotdata", send_data_json)
if self._robotPose2DInIn.isNew():
self._robotPose2DInIn.read()
odometry_x = self._d_robotPose2DIn.data.position.x
odometry_y = self._d_robotPose2DIn.data.position.y
odometry_h = self._d_robotPose2DIn.data.heading
odometry_str = "x:"+str(odometry_x)+"y:"+str(odometry_y)+"heading:"+str(odometry_h)
send_data_dict["data_type"] = "odometry"
send_data_dict["data"] = odometry_str
send_data_json = json.dumps(send_data_dict)
self.mqttc.publish_message(self._UnitHostname[0], "toUnit/Robotdata", send_data_json)
if self._countInIn.isNew():
self._countInIn.read()
send_data_dict["data_type"] = "count"
send_data_dict["data"] = self._d_countIn.data
send_data_json = json.dumps(send_data_dict)
self.mqttc.publish_message(self._UnitHostname[0], "toUnit/Robotdata", send_data_json)
return RTC.RTC_OK
###
##
## The aborting action when main logic error occurred.
## former rtc_aborting_entry()
##
## @param ec_id target ExecutionContext Id
##
## @return RTC::ReturnCode_t
##
##
#def onAborting(self, ec_id):
#
# return RTC.RTC_OK
###
##
## The error action in ERROR state
## former rtc_error_do()
##
## @param ec_id target ExecutionContext Id
##
## @return RTC::ReturnCode_t
##
##
#def onError(self, ec_id):
#
# return RTC.RTC_OK
###
##
## The reset action that is invoked resetting
## This is same but different the former rtc_init_entry()
##
## @param ec_id target ExecutionContext Id
##
## @return RTC::ReturnCode_t
##
##
#def onReset(self, ec_id):
#
# return RTC.RTC_OK
###
##
## The state update action that is invoked after onExecute() action
## no corresponding operation exists in OpenRTm-aist-0.2.0
##
## @param ec_id target ExecutionContext Id
##
## @return RTC::ReturnCode_t
##
##
#def onStateUpdate(self, ec_id):
#
# return RTC.RTC_OK
###
##
## The action that is invoked when execution context's rate is changed
## no corresponding operation exists in OpenRTm-aist-0.2.0
##
## @param ec_id target ExecutionContext Id
##
## @return RTC::ReturnCode_t
##
##
#def onRateChanged(self, ec_id):
#
# return RTC.RTC_OK
def RSNPUnitConnectorInit(manager):
profile = OpenRTM_aist.Properties(defaults_str=rsnpunitconnector_spec)
manager.registerFactory(profile,
RSNPUnitConnector,
OpenRTM_aist.Delete)
def MyModuleInit(manager):
RSNPUnitConnectorInit(manager)
# Create a component
comp = manager.createComponent("RSNPUnitConnector")
def main():
mgr = OpenRTM_aist.Manager.init(sys.argv)
mgr.setModuleInitProc(MyModuleInit)
mgr.activateManager()
mgr.runManager()
if __name__ == "__main__":
main()
|
#
# Copyright (c) 2020 Expert System Iberia
#
"""Credibility reviewer for a "query" sentence. I.e. a sentence that
may not be in the co-inform database yet. Produces a `QSentCredReview`
This assessment is done based on a reference credibility review for a
sentence in the co-inform DB, along with a similarity review between
the query sentence and the reference sentence.
"""
from acred.reviewer.credibility import dbsent_credrev
from acred.reviewer.similarity import label as simlabel
from acred.reviewer.similarity import aggsent_simreviewer
from acred.reviewer.credibility import label as credlabel
from acred.rating import agg
from acred import content
from esiutils import isodate, bot_describer, citimings, dictu, hashu
ci_context = 'http://coinform.eu'
version = '0.1.0'
content.register_acred_type('QSentCredReviewer', {
'super_types': ['SoftwareApplication', 'Bot'],
'ident_keys': ['@type', 'name', 'dateCreated', 'softwareVersion', 'isBasedOn', 'launchConfiguration'],
'route_template': '/bot/{@type}/{softwareVersion}/{identifier}',
'itemref_keys': ['isBasedOn']
})
content.register_acred_type('QSentCredReview', {
'super_types': ['CredibilityReview', 'Review'],
'ident_keys': ['@type', 'dateCreated', 'author', 'itemReviewed', 'reviewRating', 'isBasedOn'],
'route_template': '/review/{identifier}',
'itemref_keys': ['author', 'itemReviewed', 'reviewRating', 'isBasedOn']
})
def review(item, based_on, config):
"""Reviews the incoming item and returns a Review for it
:param item: a single item or a list of items, in this case the
items must be `Sentence` instances.
:param based_on: list of zero or more relevant reviews that may
be required by this reviewer to perform the review of the `item`.
For qsent_credrev, this must contain both a SentenceSimilarityReview
and a DBSentCredReview.
:param config: a configuration map
:returns: one or more Review objects for the input items
:rtype: dict or list of dict
"""
raise NotImplemented()
def default_sub_bots(cfg):
return [dbsent_credrev.default_bot_info(cfg),
aggsent_simreviewer.default_bot_info(cfg)]
def bot_info(sub_bots, cfg):
result = {
'@context': ci_context,
'@type': 'QSentCredReviewer',
'name': 'ESI Query Sentence Credibility Reviewer',
'description': 'Estimates the credibility of a sentence based on its polar similarity with a sentence in the Co-inform database for which a credibility can be estimated',
'additionalType': content.super_types('QSentCredReviewer'),
'author': bot_describer.esiLab_organization(),
'softwareVersion': version,
'dateCreated': '2020-03-27T22:54:00Z',
'url': 'http://coinform.eu/bot/QSentenceCredReviewer/%s' % version,
'applicationSuite': 'Co-inform',
'isBasedOn': sub_bots,
'launchConfiguration': {}
}
ident = hashu.hash_dict(dictu.select_keys(
result, content.ident_keys(result)))
return {
**result,
'identifier': ident
}
def default_bot_info(cfg):
return bot_info(default_sub_bots(cfg), cfg)
def similarSent_as_QSentCredReview(simSent, claimSimResult, cfg):
"""Converts a `SimilarSent` into a `QSentCredReview`
This review is done based on a reference credibility review
for a sentence in the co-inform DB, along with a similarity review
between the query sentence and the reference sentence.
:param simSent: a `SimilarSent` dict
:param claimSimResult: the `SemanticClaimSimilarityResult` containing `simSent`
:param cfg: configuration options
:returns: a `QSentCredReview`
:rtype: dict
"""
aggqsent_simreview = aggsent_simreviewer.similarSent_as_SentPolarSimilarityReview(
simSent, claimSimResult, cfg)
dbSent_credreview = dbsent_credrev.similarSent_as_DBSentCredRev(simSent, cfg)
return aggregate_subReviews(aggqsent_simreview, dbSent_credreview, cfg)
def aggregate_subReviews(aggqsent_simreview, dbSent_credreview, cfg):
"""Combines a polar similarity review and a dbSent credReview into a `QSentCredReview`
:param aggqsent_simreview: a `SentPolarSimilarityReview` dict
describing the polar similarity between the query and the db
sentences
:param dbSent_credreview: a `DBSentCredReview` for the db sentence
:param cfg: configuration options
:returns: a `QSentCredReview` that provides an aggregate
credibility review and rating for the query sentence
:rtype: dict
"""
agg_start = citimings.start()
dbSent_credval = dictu.get_in(dbSent_credreview, ['reviewRating', 'ratingValue'])
assert dbSent_credval >= -1.0 and dbSent_credval <= 1.0
dbSent = dictu.get_in(dbSent_credreview, ['itemReviewed', 'text'])
agg_sim = dictu.get_in(aggqsent_simreview, ['reviewRating', 'ratingValue'])
qSent = dictu.get_in(aggqsent_simreview, ['itemReviewed', 'sentA', 'text'])
dbSent2 = dictu.get_in(aggqsent_simreview, ['itemReviewed', 'sentB', 'text'])
assert dbSent == dbSent2, '%s != %s' % (dbSent, dbSent2)
agg_cred_conf = dictu.get_in(dbSent_credreview, ['reviewRating', 'confidence']) * abs(agg_sim)
assert agg_cred_conf >= 0.0 and agg_cred_conf <= 1.0, agg_cred_conf
sim_polarity = -1 if agg_sim < 0 else 1
isBasedOn = [aggqsent_simreview, dbSent_credreview] # subReviews
subRatings = [rev.get('reviewRating')
for rev in isBasedOn
if rev.get('reviewRating') is not None]
sub_bots = [] # TODO: extract author of subRatings?, directly request bot_info of deps?
# the sentence ...
explanation = '*%s*:\n\n * `%s`\nthat seems *%s* %s' % (
aggqsent_simreview.get('headline', ' '), # the polar relation between qsent and dbsent
dbSent,
credlabel.rating_label(dbSent_credreview['reviewRating'], cfg),
#credlabel.describe_credval(dbSent_credval, cred_dict=dbSent_credreview), # TODO: remove
dictu.get_in(dbSent_credreview, ['reviewRating', 'ratingExplanation']))
revRating = {
'@context': ci_context,
'@type': 'AggregateRating',
'additionalType': ['Rating'],
'reviewAspect': 'credibility',
'reviewCount': agg.total_reviewCount(subRatings) + len(isBasedOn),
'ratingCount': agg.total_ratingCount(subRatings),
'ratingValue': sim_polarity * dbSent_credval,
'confidence': agg_cred_conf,
'ratingExplanation': explanation
}
return {
'@context': ci_context,
'@type': 'QSentCredReview',
'additionalType': content.super_types('QSentCredReview'),
'itemReviewed': content.as_sentence(qSent, cfg=cfg),
'text': 'Sentence `%s` seems *%s* as it %s' % (
qSent, credlabel.rating_label(revRating, cfg),
explanation),
'dateCreated': isodate.now_utc_timestamp(),
'author': bot_info(sub_bots, cfg),
'reviewAspect': 'credibility',
'reviewRating': revRating,
'isBasedOn': isBasedOn
}
def ensure_credibility(relsents, cfg={}):
"""Add a `similarity_credibility` field to input relsents
It does this by combining the domain credibility, claimReview and
possibly stance detection results.
**Depreated**: use similarSent_as_QSentCredReview and/or
aggregate_subReviews
:param relsents: list of or a single SimilarSent dict. You should
have already performed claimReview credibility rating
normalisation. See `enhance_relsent`.
:returns: input SimilarSent with additional credibility field
:rtype: dict
"""
if type(relsents) == list:
return [ensure_credibility(rs, cfg=cfg) for rs in relsents]
assert type(relsents) == dict
relsent = relsents # single relsent
assert 'similarity' in relsent
sim = relsent.get('similarity', 0.5)
assert sim >= 0.0 and sim <= 1.0
top_cred = dbsent_credrev.select_top_relsent_cred(relsent)
top_credval = top_cred.get('value', 0.0)
top_conf = top_cred.get('confidence', 0.0)
assert top_conf >= 0.0 and top_conf <= 1.0
# doc_stance = relsent.get('doc_stance', None)
# doc_stance_conf = relsent.get('doc_stance_confidence', 0.0)
sent_stance = relsent.get('sent_stance', None)
sent_stance_conf = relsent.get('sent_stance_confidence', 0.0)
polarity = -1 if sent_stance == 'disagree' else 1
agg_sim = aggsent_simreviewer.calc_agg_polarsim(sim, sent_stance, sent_stance_conf, cfg)
agg_conf = top_conf * abs(agg_sim)
explanation = 'Claim *%s*:\n\n * %s\nthat %s. %s' % (
simlabel.claim_rel_str(sim, sent_stance),
relsent.get(
'sentence',
"missing sentence (keys %s)" % (
list(relsent.keys()))),
credlabel.describe_credval(top_credval, cred_dict=top_cred),
top_cred.get('explanation', ''))
relsent['similarity_credibility'] = { # MUTATE input!!
'value': top_credval * polarity,
'confidence': agg_conf,
'explanation': explanation
}
assert agg_conf >= 0.0 and agg_conf <= 1.0, agg_conf
assert top_credval >= -1.0 and top_credval <= 1.0
return relsent
|
from typing import Dict
from core.coordinate import Coordinate
class Edge:
destination: 'Node'
source: 'Node'
def __init__(self, source: 'Node', destination: 'Node'):
self.destination = destination
self.source = source
def __hash__(self):
return hash(f"{self.destination}_{self.source}")
class Node:
out_edges: Dict['Node', Edge]
in_edges: Dict['Node', Edge]
def __init__(self, node_id: int, lng: float, lat: float):
self.node_id = node_id
self.location = Coordinate(lng, lat)
self.out_edges = {}
self.in_edges = {}
def add_out_edge(self, node: 'Node', edge: Edge):
if node not in self.out_edges: self.out_edges[node] = edge
if node not in self.in_edges: self.in_edges[node] = edge
def add_in_edge(self, node: 'Node', edge: Edge):
if node not in self.out_edges: self.out_edges[node] = edge
if node not in self.in_edges: self.in_edges[node] = edge
def __hash__(self):
return hash(self.node_id)
def __repr__(self):
return str(self.node_id)
|
from .dialogue import *
|
##
# See the file COPYRIGHT for copyright information.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Tests for :mod:`ranger-ims-server.store`
"""
from abc import ABC, abstractmethod
from datetime import datetime as DateTime
from datetime import timedelta as TimeDelta
from typing import ClassVar, Optional, Sequence
from attr import attrs
from ims.ext.trial import AsynchronousTestCase
from ims.model import Event, Incident, IncidentReport, ReportEntry
from .._abc import IMSDataStore
__all__ = ()
@attrs(frozen=True, auto_attribs=True, kw_only=True)
class TestDataStoreMixIn(ABC):
"""
:class:`IMSDataStore` mix-in for testing.
"""
maxIncidentNumber: ClassVar[int] = 2 ** 63 - 1 # Default to 64-bit int
exceptionClass: ClassVar[type] = Exception
exceptionMessage: ClassVar[str] = "I'm broken, yo"
@abstractmethod
def bringThePain(self) -> None:
"""
Raise exceptions on future DB queries.
"""
def raiseException(self) -> None:
"""
Raise a database exception.
"""
raise self.exceptionClass(self.exceptionMessage)
@abstractmethod
async def storeEvent(self, event: Event) -> None:
"""
Store the given event in the test store.
"""
@abstractmethod
async def storeIncident(self, incident: Incident) -> None:
"""
Store the given incident in the test store.
"""
@abstractmethod
async def storeIncidentReport(self, incidentReport: IncidentReport) -> None:
"""
Store the given incident report in the test store.
"""
@abstractmethod
async def storeConcentricStreet(
self,
event: Event,
streetID: str,
streetName: str,
ignoreDuplicates: bool = False,
) -> None:
"""
Store a street in the given event with the given ID and name in the
test store.
"""
@abstractmethod
async def storeIncidentType(self, name: str, hidden: bool) -> None:
"""
Store an incident type with the given name and hidden state in the
test store.
"""
def dateTimesEqual(self, a: DateTime, b: DateTime) -> bool:
"""
Compare two :class:`DateTime` objects.
Apply some "close enough" logic to deal with the possibility that
date-times stored in a database may be slightly off when retrieved.
"""
# Floats stored may be slightly off when round-tripped.
return a - b < TimeDelta(microseconds=20)
def reportEntriesEqual(
self,
reportEntriesA: Sequence[ReportEntry],
reportEntriesB: Sequence[ReportEntry],
ignoreAutomatic: bool = False,
) -> bool:
"""
Compare two sequences of :class:`ReportEntry` objects, using
:meth:`dateTimesEqual` when comparing date-times.
"""
if ignoreAutomatic:
reportEntriesA = tuple(e for e in reportEntriesA if not e.automatic)
if len(reportEntriesA) != len(reportEntriesB):
return False
for entryA, entryB in zip(reportEntriesA, reportEntriesB):
if entryA != entryB:
if entryA.author != entryB.author:
return False
if entryA.automatic != entryB.automatic:
return False
if entryA.text != entryB.text:
return False
if not self.dateTimesEqual(entryA.created, entryB.created):
return False
return True
@staticmethod
def normalizeIncidentAddress(incident: Incident) -> Incident:
"""
Normalize the address in an incident to a canonical form, if necessary.
"""
return incident
@attrs(frozen=True, auto_attribs=True, kw_only=True)
class TestDataStoreABC(IMSDataStore, TestDataStoreMixIn):
"""
Test Data Store ABC.
"""
class DataStoreTests(AsynchronousTestCase):
"""
Tests for :class:`IMSDataStore` event access.
"""
skip: ClassVar[Optional[str]] = "Parent class of real tests"
async def store(self) -> TestDataStoreABC:
"""
Return a data store for use in tests.
"""
raise NotImplementedError("Subclass should implement store()")
|
# expected: fail
"""Tests for distutils.
The tests for distutils are defined in the distutils.tests package;
the test_suite() function there returns a test suite that's ready to
be run.
"""
from test import test_support
import distutils.tests
def test_main():
test_support.run_unittest(distutils.tests.test_suite())
test_support.reap_children()
if __name__ == "__main__":
test_main()
|
#!/usr/bin/env python3.7
import sys
from os.path import dirname, join, abspath
from CeTests import *
test = greaterThan()
test.run()
test = greaterThanOrEqual()
test.run()
test = lessThanOrEqual()
test.run()
test = lessThan()
test.run()
test = equal()
test.run()
test = notEqual()
test.run()
test = between()
test.run()
test = inList()
test.run()
test = attribute_exists()
test.run()
test = attribute_not_exists()
test.run()
test = attribute_type()
test.run()
test = begins_with()
test.run()
test = size()
test.run()
test = Not()
test.run()
test = And()
test.run()
test = Or()
test.run()
test = precedence()
test.run()
|
############################################################################
# //KEYSTROKE// #
# //By Arvindcheenu.// #
# //2014-15.All rights reserved.// #
############################################################################
import sys,time,random
#Human Typing Simulator:
def print_slow(str):
for letter in str:
sys.stdout.write(letter)
sys.stdout.flush()
time.sleep(0.02)
#Letter Counter:
def count_letters(word):
space = " "
return len([letter for letter in word if letter not in space])
# Conversation Dictionary
convo ={"Type this sentence to fight back":"Type this sentence to fight back",
"Great, now keep typing":"Great, now keep typing",
"Type faster to deactivate the bomb":"Type faster to deactivate the bomb",
"The city is being evacuated now":"The city is being evacuated now",
"Every keystroke is saving lives":"Every keystroke is saving lives",
"Oh we have zome kommunication ichues":"Oh we have zome kommunication ichues",
"Someteng is wronng with the segneal":"Someteng is wronng with the segneal",
"Can yu styll haer ush?":"Can yu styll haer ush?",
"Ze boemb iz cosing interrrfearreeences":"Ze boemb iz cosing interrrfearreeences",
"I thank msoe hackres aer dinog tihs!":"I thank msoe hackres aer dinog tihs",
"Cam yu ear mey cleerley?! Whet?":"Cam yu ear mey cleerley?! Whet?",
"Amaznigly skidlled figners!! I sey!":"Amaznigly skidlled figners!! I sey!",
"What on erth iz habbenning?!":"What on erth iz habbenning?!",
"The Gevornmant planz to meind-conterol people!!":"The Gevornmant planz to meind-conterol people!!",
"The Illerminati is Everywhere!! Look back!!":"The Illerminati is Everywhere!! Look back!!",
"Routing encrypted data from defense satellite...":"Routing encrypted data from defense satellite..."}
print("======================================================================================================")
print_slow( "//KEYSTROKE//")
print("")
print("======================================================================================================")
print ("Press 'I' for instructions")
print ("Press 'D' to choose difficulty")
print ("Press 'Q' to quit")
print("======================================================================================================")
opt=input("> ")
while opt!="Q":
if opt=="D" or opt=="d":
print("======================================================================================================")
print ("Choose Difficulty:")
print ("1. Novice (NOV)")
print ("2. Rookie (ROK)")
print ("3. Professional (PRO)")
print("======================================================================================================")
dif=input("Enter option code: ")
print("")
if dif=="NOV" or dif=="nov":
print ("Your difficulty has been set.")
print("")
play=input( "PRESS 'P' TO PLAY============================================================================ ")
if play=="P" or play=="p":
print("")
print("Get ready...")
time.sleep(1)
print("Set...")
time.sleep(1)
print("TYPE!..")
time.sleep(1)
print("")
lifesave_nov=30
missile_time_nov= 90.0
start_time = time.time()
convo_len=int(len(convo))
qus=(random.choice( list(convo.keys())))
print (qus)
ans=input("> ")
while qus!=ans:
print ("try again..")
ans=input("> ")
fin_time = time.time() - start_time
fin_time = round(fin_time,2)
end_time = missile_time_nov - fin_time
end_time = int(round(end_time,2))
if end_time<=0:
print ("Resistance is futile. City is destroyed.")
print ("The time taken is", fin_time,"seconds.")
print ("Time left for missile to explode is ",end_time, "seconds.")
print ("Over",lifesaved," lives saved.")
print ("You are a true hero.")
break
while qus==ans:
qus=(random.choice( list(convo.keys())))
print (qus)
ans=input("> ")
lifesaved=((count_letters(ans))*lifesave_nov)
fin_time = time.time() - start_time
fin_time = round(fin_time,2)
end_time = missile_time_nov - fin_time
end_time = int(round(end_time,2))
if end_time<=0:
print ("Resistance is futile. City is destroyed.")
print ("The time taken is", fin_time,"seconds.")
print ("Time left for missile to explode is ",end_time,"seconds.")
print ("Over",lifesaved,"lives saved.")
print ("You are a true hero.")
break
break
elif dif=="ROK" or dif=="rok":
print ("Your difficulty has been set.")
print("")
play=input( "PRESS 'P' TO PLAY============================================================================ ")
if play=="P" or play=="p":
print("")
print("Get ready...")
time.sleep(1)
print("Set...")
time.sleep(1)
print("TYPE!..")
time.sleep(1)
print("")
lifesave_rok=10
missile_time_rok= 60.0
start_time = time.time()
convo_len=int(len(convo))
qus=(random.choice( list(convo.keys())))
print (qus)
ans=input("> ")
while qus!=ans:
print ("try again..")
ans=input("> ")
fin_time = time.time() - start_time
fin_time = round(fin_time,2)
end_time = missile_time_rok - fin_time
end_time = int(round(end_time,2))
if end_time<=0:
print ("Resistance is futile. City is destroyed.")
print ("The time taken is", fin_time,"seconds.")
print ("Time left for missile to explode is ",end_time, "seconds.")
print ("Over",lifesaved," lives saved.")
print ("You are a true hero.")
break
while qus==ans:
qus=(random.choice( list(convo.keys())))
print (qus)
ans=input("> ")
lifesaved=((count_letters(ans))*lifesave_rok)
fin_time = time.time() - start_time
fin_time = round(fin_time,2)
end_time = missile_time_rok - fin_time
end_time = int(round(end_time,2))
if end_time<=0:
print ("Resistance is futile. City is destroyed.")
print ("The time taken is", fin_time,"seconds.")
print ("Time left for missile to explode is ",end_time,"seconds.")
print ("Over",lifesaved,"lives saved.")
print ("You are a true hero.")
break
break
else:
print ("Your difficulty has been set.")
print("")
play=input( "PRESS 'P' TO PLAY============================================================================ ")
if play=="P" or play=="p":
print("")
print("Get ready...")
time.sleep(1)
print("Set...")
time.sleep(1)
print("TYPE!..")
time.sleep(1)
print("")
lifesave_pro=5
missile_time_pro= 30.0
start_time = time.time()
convo_len=int(len(convo))
qus=(random.choice( list(convo.keys())))
print (qus)
ans=input("> ")
while qus!=ans:
print ("try again..")
ans=input("> ")
fin_time = time.time() - start_time
fin_time = round(fin_time,2)
end_time = missile_time_pro - fin_time
end_time = int(round(end_time,2))
if end_time<=0:
print ("Resistance is futile. City is destroyed.")
print ("The time taken is", fin_time,"seconds.")
print ("Time left for missile to explode is ",end_time, "seconds.")
print ("Over",lifesaved," lives saved.")
print ("You are a true hero.")
break
while qus==ans:
qus=(random.choice( list(convo.keys())))
print (qus)
ans=input("> ")
lifesaved=((count_letters(ans))*lifesave_pro)
fin_time = time.time() - start_time
fin_time = round(fin_time,2)
end_time = missile_time_pro - fin_time
end_time = int(round(end_time,2))
if end_time<=0:
print ("Resistance is futile. City is destroyed.")
print ("The time taken is", fin_time,"seconds.")
print ("Time left for missile to explode is ",end_time,"seconds.")
print ("Over",lifesaved,"lives saved.")
print ("You are a true hero.")
break
break
else:
print("--------------------------------------------------//INSTRUCTIONS//----------------------------------------------------")
print("")
print("THE POWER TO SAVE LIVES WITH THE PRESS OF A KEY.")
print("")
print("The basic rule for the game is to save maximum number of lives possible by typing commands you get in the questions.")
print("Important note: The answers are case sensitive. It tests your accuracy in typing.")
print("The number of lives saved and the time of strike of the missile will vary by difficulty of 3 levels.")
print("Toughest levels will have the least time and the least number of lives saved.")
print("Though its impossible to save all, reducing casualities must be your priority.")
print("")
print("BE A HERO, TYPOMANDO.")
print("")
print("Press D to choose difficulty")
print("---------------------------------------------------------------------------------------------------------------------------------")
opt=input("> ")
if opt=="Q" or opt=="q":
print ("Game is quitting...")
time.sleep(1)
quit()
input()
|
import pytest
import numpy as np
from flare.struc import Structure
from flare.env import AtomicEnvironment
def test_species_count():
cell = np.eye(3)
species = [1, 2, 3]
positions = np.array([[0, 0, 0], [0.5, 0.5, 0.5], [0.1, 0.1, 0.1]])
struc_test = Structure(cell, species, positions)
env_test = AtomicEnvironment(structure= struc_test,
atom = 0,
cutoffs = np.array([1,1]))
assert(len(struc_test.positions) == len(struc_test.coded_species))
assert(len(env_test.bond_array_2) == len(env_test.etypes))
assert(isinstance(env_test.etypes[0], np.int8))
def test_env_methods():
cell = np.eye(3)
species = [1, 2, 3]
positions = np.array([[0, 0, 0], [0.5, 0.5, 0.5], [0.1, 0.1, 0.1]])
struc_test = Structure(cell, species, positions)
env_test = AtomicEnvironment(struc_test, 0, np.array([1, 1]))
assert str(env_test) == 'Atomic Env. of Type 1 surrounded by 12 atoms' \
' of Types [2, 3]'
the_dict = env_test.to_dict()
assert isinstance(the_dict, dict)
for key in ['positions','cell','atom','cutoff_2','cutoff_3','species']:
assert key in the_dict.keys()
remade_env = AtomicEnvironment.from_dict(the_dict)
assert isinstance(remade_env,AtomicEnvironment)
assert np.array_equal(remade_env.bond_array_2, env_test.bond_array_2)
assert np.array_equal(remade_env.bond_array_3, env_test.bond_array_3)
|
from flask import url_for
from meowbot.triggers import SimpleResponseCommand
from meowbot.conditions import IsCommand
from meowbot.context import CommandContext
class Homepage(SimpleResponseCommand):
condition = IsCommand(["homepage", "home"])
help = "`homepage`: link to Meowbot homepage"
def get_message_args(self, context: CommandContext):
return {"text": url_for("main.index", _external=True)}
class GitHub(SimpleResponseCommand):
condition = IsCommand(["github", "git", "source"])
help = "`github`: GitHub page for Meowbot"
def get_message_args(self, context: CommandContext):
return {"text": "https://github.com/pbhuss/meowbot"}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.