content
stringlengths
5
1.05M
from __future__ import annotations import copy from typing import List, Union from machine_common_sense.config_manager import Vector3d from .base_objects import ( create_soccer_ball, create_variable_definition_from_base, ) from .definitions import ( ChosenMaterial, DefinitionDataset, ObjectDefinition, get_dataset, ) def multiply(one: Vector3d, two: Vector3d) -> Vector3d: return Vector3d(one.x * two.x, one.y * two.y, one.z * two.z) _BALL_PLASTIC = create_variable_definition_from_base( type='ball', size_multiplier_list=[0.05, 0.1, 0.25, 0.5], chosen_material_list=[ChosenMaterial.PLASTIC_HOLLOW, ChosenMaterial.RUBBER] ) _BALL_NON_PLASTIC = create_variable_definition_from_base( type='ball', size_multiplier_list=[0.025, 0.05, 0.1, 0.25], chosen_material_list=[ ChosenMaterial.BLOCK_WOOD, ChosenMaterial.METAL, ChosenMaterial.WOOD ] ) _BLOCK_BLANK_CUBE = create_variable_definition_from_base( type='block_blank_wood_cube', size_multiplier_list=[1, Vector3d(1, 2, 1), Vector3d(2, 1, 2)], chosen_material_list=[ChosenMaterial.BLOCK_WOOD, ChosenMaterial.WOOD] ) _BLOCK_BLANK_CYLINDER = create_variable_definition_from_base( type='block_blank_wood_cylinder', size_multiplier_list=[1, Vector3d(1, 2, 1), Vector3d(2, 1, 2)], chosen_material_list=[ChosenMaterial.BLOCK_WOOD, ChosenMaterial.WOOD] ) _BLOCK_LETTER = create_variable_definition_from_base( # Note: please ignore the "blue letter c" in the type: the object's # chosen material will change this design. type='block_blue_letter_c', size_multiplier_list=[1] ) _BLOCK_NUMBER = create_variable_definition_from_base( # Note: please ignore the "yellow number 1" in the type: the object's # chosen material will change this design. type='block_yellow_number_1', size_multiplier_list=[1] ) _DUCK_ON_WHEELS = create_variable_definition_from_base( type='duck_on_wheels', size_multiplier_list=[0.5, 1, 2], chosen_material_list=[ChosenMaterial.BLOCK_WOOD, ChosenMaterial.WOOD] ) _TOY_RACECAR = create_variable_definition_from_base( type='racecar_red', size_multiplier_list=[0.75, 1.5, 3], chosen_material_list=[ChosenMaterial.BLOCK_WOOD, ChosenMaterial.WOOD] ) _PACIFIER = create_variable_definition_from_base( type='pacifier', size_multiplier_list=[1] ) _CRAYON = ObjectDefinition( chooseTypeList=[ create_variable_definition_from_base( type='crayon_black', size_multiplier_list=[1] ), create_variable_definition_from_base( type='crayon_blue', size_multiplier_list=[1] ), create_variable_definition_from_base( type='crayon_green', size_multiplier_list=[1] ), create_variable_definition_from_base( type='crayon_pink', size_multiplier_list=[1] ), create_variable_definition_from_base( type='crayon_red', size_multiplier_list=[1] ), create_variable_definition_from_base( type='crayon_yellow', size_multiplier_list=[1] ) ] ) _TURTLE_ON_WHEELS = create_variable_definition_from_base( type='turtle_on_wheels', size_multiplier_list=[0.5, 1, 2], chosen_material_list=[ChosenMaterial.BLOCK_WOOD, ChosenMaterial.WOOD] ) _TOY_SEDAN = create_variable_definition_from_base( type='car_1', size_multiplier_list=[0.75, 1.5, 3], chosen_material_list=[ChosenMaterial.BLOCK_WOOD, ChosenMaterial.WOOD] ) _APPLE = ObjectDefinition( chooseTypeList=[ create_variable_definition_from_base( type='apple_1', size_multiplier_list=[1] ), create_variable_definition_from_base( type='apple_2', size_multiplier_list=[1] ) ] ) _BOWL = ObjectDefinition( chooseTypeList=[ create_variable_definition_from_base( type='bowl_3', size_multiplier_list=[1] ), create_variable_definition_from_base( type='bowl_4', size_multiplier_list=[1] ), create_variable_definition_from_base( type='bowl_6', size_multiplier_list=[1] ) ], chooseMaterialList=[ ChosenMaterial.PLASTIC.copy(), ChosenMaterial.WOOD.copy() ] ) _CUP = ObjectDefinition( chooseTypeList=[ create_variable_definition_from_base( type='cup_2', size_multiplier_list=[1] ), create_variable_definition_from_base( type='cup_3', size_multiplier_list=[1] ), create_variable_definition_from_base( type='cup_6', size_multiplier_list=[1] ) ], chooseMaterialList=[ ChosenMaterial.PLASTIC.copy(), ChosenMaterial.WOOD.copy() ] ) _PLATE = ObjectDefinition( chooseTypeList=[ create_variable_definition_from_base( type='plate_1', size_multiplier_list=[1] ), create_variable_definition_from_base( type='plate_3', size_multiplier_list=[1] ), create_variable_definition_from_base( type='plate_4', size_multiplier_list=[1] ) ], chooseMaterialList=[ ChosenMaterial.PLASTIC.copy(), ChosenMaterial.WOOD.copy() ] ) _BOOKCASE = ObjectDefinition( chooseTypeList=[ create_variable_definition_from_base( type='bookcase_1_shelf', size_multiplier_list=[1], ), create_variable_definition_from_base( type='bookcase_1_shelf', size_multiplier_list=[Vector3d(0.5, 1, 1)] ), create_variable_definition_from_base( type='bookcase_1_shelf', size_multiplier_list=[Vector3d(2, 1, 1)] ), create_variable_definition_from_base( type='bookcase_2_shelf', size_multiplier_list=[1] ), create_variable_definition_from_base( type='bookcase_2_shelf', size_multiplier_list=[Vector3d(0.5, 1, 1)] ), create_variable_definition_from_base( type='bookcase_2_shelf', size_multiplier_list=[Vector3d(2, 1, 1)] ), create_variable_definition_from_base( type='bookcase_3_shelf', size_multiplier_list=[1] ), create_variable_definition_from_base( type='bookcase_3_shelf', size_multiplier_list=[Vector3d(0.5, 1, 1)] ), create_variable_definition_from_base( type='bookcase_3_shelf', size_multiplier_list=[Vector3d(2, 1, 1)] ), create_variable_definition_from_base( type='bookcase_4_shelf', size_multiplier_list=[1] ), create_variable_definition_from_base( type='bookcase_4_shelf', size_multiplier_list=[Vector3d(0.5, 1, 1)] ), create_variable_definition_from_base( type='bookcase_4_shelf', size_multiplier_list=[Vector3d(2, 1, 1)] ) ], chooseMaterialList=[ ChosenMaterial.METAL.copy(), ChosenMaterial.PLASTIC.copy(), ChosenMaterial.WOOD.copy() ] ) _BOOKCASE_SIDELESS = ObjectDefinition( chooseTypeList=[ create_variable_definition_from_base( type='bookcase_1_shelf_sideless', size_multiplier_list=[1], ), create_variable_definition_from_base( type='bookcase_1_shelf_sideless', size_multiplier_list=[Vector3d(0.5, 1, 1)] ), create_variable_definition_from_base( type='bookcase_1_shelf_sideless', size_multiplier_list=[Vector3d(2, 1, 1)] ), create_variable_definition_from_base( type='bookcase_2_shelf_sideless', size_multiplier_list=[1] ), create_variable_definition_from_base( type='bookcase_2_shelf_sideless', size_multiplier_list=[Vector3d(0.5, 1, 1)] ), create_variable_definition_from_base( type='bookcase_2_shelf_sideless', size_multiplier_list=[Vector3d(2, 1, 1)] ), create_variable_definition_from_base( type='bookcase_3_shelf_sideless', size_multiplier_list=[1] ), create_variable_definition_from_base( type='bookcase_3_shelf_sideless', size_multiplier_list=[Vector3d(0.5, 1, 1)] ), create_variable_definition_from_base( type='bookcase_3_shelf_sideless', size_multiplier_list=[Vector3d(2, 1, 1)] ), create_variable_definition_from_base( type='bookcase_4_shelf_sideless', size_multiplier_list=[1] ), create_variable_definition_from_base( type='bookcase_4_shelf_sideless', size_multiplier_list=[Vector3d(0.5, 1, 1)] ), create_variable_definition_from_base( type='bookcase_4_shelf_sideless', size_multiplier_list=[Vector3d(2, 1, 1)] ) ], chooseMaterialList=[ ChosenMaterial.METAL.copy(), ChosenMaterial.PLASTIC.copy(), ChosenMaterial.WOOD.copy() ] ) _CART = create_variable_definition_from_base( type='cart_1', size_multiplier_list=[0.5, 1], chosen_material_list=[ChosenMaterial.METAL] ) _CHAIR_1_BABY_SCALED = create_variable_definition_from_base( type='chair_1', attributes_overrides=['pickupable', 'receptacle'], size_multiplier_list=[0.333, 0.5, 0.667], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _CHAIR_1 = create_variable_definition_from_base( type='chair_1', size_multiplier_list=[0.75, 1, 1.25], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _CHAIR_2_STOOL_CIRCLE_BABY_SCALED = create_variable_definition_from_base( type='chair_2', attributes_overrides=['pickupable', 'receptacle'], size_multiplier_list=[ Vector3d(0.25, 0.5, 0.25), Vector3d(0.5, 0.5, 0.5), Vector3d(0.75, 0.5, 0.75) ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _CHAIR_2_STOOL_CIRCLE = create_variable_definition_from_base( type='chair_2', size_multiplier_list=[ Vector3d(0.75, 0.75, 0.75), Vector3d(1, 0.75, 1), Vector3d(1, 1, 1) ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _CHAIR_3_STOOL_RECT = create_variable_definition_from_base( type='chair_3', size_multiplier_list=[ Vector3d(0.5, 0.5, 0.5), Vector3d(0.667, 0.667, 0.667), Vector3d(0.75, 0.75, 0.75) ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _CHAIR_4_OFFICE = create_variable_definition_from_base( type='chair_4', size_multiplier_list=[ Vector3d(0.7, 0.7, 0.7), Vector3d(0.9, 0.9, 0.9), Vector3d(1.1, 1.1, 1.1) ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.PLASTIC] ) _BLOCK_BLANK_CUBE_NOT_PICKUPABLE = create_variable_definition_from_base( type='block_blank_wood_cube', attributes_overrides=['moveable'], size_multiplier_list=[2, Vector3d(2, 4, 2), Vector3d(4, 2, 4)], chosen_material_list=[ChosenMaterial.BLOCK_WOOD, ChosenMaterial.WOOD] ) _BLOCK_BLANK_CYLINDER_NOT_PICKUPABLE = create_variable_definition_from_base( type='block_blank_wood_cylinder', attributes_overrides=['moveable'], size_multiplier_list=[2, Vector3d(2, 4, 2), Vector3d(4, 2, 4)], chosen_material_list=[ChosenMaterial.BLOCK_WOOD, ChosenMaterial.WOOD] ) # TODO Update this to use ObjectDefinition if needed in the future. # _CHANGING_TABLE = { # "type": "changing_table", # "shape": ["changing table"], # "size": "huge", # "mass": 50, # "materialCategory": ["wood"], # "salientMaterials": ["wood"], # "attributes": ["receptacle", "openable", "occluder"], # "enclosedAreas": [{ # # Remove the top drawer for now. # # "id": "_drawer_top", # # "position": { # # "x": 0.165, # # "y": 0.47, # # "z": -0.03 # # }, # # "dimensions": { # # "x": 0.68, # # "y": 0.22, # # "z": 0.41 # # } # # }, { # "id": "_drawer_bottom", # "position": { # "x": 0.175, # "y": 0.19, # "z": -0.03 # }, # "dimensions": { # "x": 0.68, # "y": 0.2, # "z": 0.41 # } # }], # "openAreas": [{ # # Remove the top shelves for now. # # "id": "", # # "position": { # # "x": 0, # # "y": 0.85, # # "z": 0 # # }, # # "dimensions": { # # "x": 1, # # "y": 0, # # "z": 0.55 # # } # # }, { # # "id": "_shelf_top", # # "position": { # # "x": 0, # # "y": 0.725, # # "z": -0.05 # # }, # # "dimensions": { # # "x": 1.05, # # "y": 0.2, # # "z": 0.44 # # } # # }, { # # "id": "_shelf_middle", # # "position": { # # "x": -0.365, # # "y": 0.475, # # "z": -0.05 # # }, # # "dimensions": { # # "x": 0.32, # # "y": 0.25, # # "z": 0.44 # # } # # }, { # "id": "_shelf_bottom", # "position": { # "x": -0.365, # "y": 0.2, # "z": -0.05 # }, # "dimensions": { # "x": 0.32, # "y": 0.25, # "z": 0.44 # } # }], # "dimensions": { # "x": 1.1, # "y": 0.96, # "z": 0.89 # }, # "offset": { # "x": 0, # "y": 0.48, # "z": 0.155 # }, # "closedDimensions": { # "x": 1.1, # "y": 0.96, # "z": 0.58 # }, # "closedOffset": { # "x": 0, # "y": 0.48, # "z": 0 # }, # "positionY": 0, # "scale": { # "x": 1, # "y": 1, # "z": 1 # } # } _CRIB = create_variable_definition_from_base( type='crib', size_multiplier_list=[1], chosen_material_list=[ChosenMaterial.WOOD] ) # Set the default X to 1.175 so the table's shape is an exact 1:2 ratio. _TABLE_1_RECT_VECTOR = Vector3d(1.175, 1, 1) _TABLE_1_RECT_BABY_SCALED = create_variable_definition_from_base( type='table_1', attributes_overrides=['pickupable', 'receptacle'], size_multiplier_list=[ multiply(_TABLE_1_RECT_VECTOR, Vector3d(0.5, 0.5, 0.5)), multiply(_TABLE_1_RECT_VECTOR, Vector3d(0.5, 0.333, 0.5)), multiply(_TABLE_1_RECT_VECTOR, Vector3d(0.5, 0.5, 0.25)), multiply(_TABLE_1_RECT_VECTOR, Vector3d(0.5, 0.333, 0.25)) ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) _TABLE_1_RECT_ACCESSIBLE = create_variable_definition_from_base( type='table_1', size_multiplier_list=[ multiply(_TABLE_1_RECT_VECTOR, Vector3d(1, 0.5, 0.5)), multiply(_TABLE_1_RECT_VECTOR, Vector3d(1, 0.5, 1)), multiply(_TABLE_1_RECT_VECTOR, Vector3d(2, 0.5, 1)), multiply(_TABLE_1_RECT_VECTOR, Vector3d(1, 1, 0.5)), multiply(_TABLE_1_RECT_VECTOR, Vector3d(1, 1, 1)), multiply(_TABLE_1_RECT_VECTOR, Vector3d(2, 1, 1)) ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) _TABLE_2_CIRCLE_BABY_SCALED = create_variable_definition_from_base( type='table_2', attributes_overrides=['pickupable', 'receptacle'], size_multiplier_list=[ Vector3d(0.333, 0.333, 0.333), Vector3d(0.5, 0.333, 0.5) ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _TABLE_2_CIRCLE_ACCESSIBLE = create_variable_definition_from_base( type='table_2', size_multiplier_list=[ Vector3d(0.5, 0.5, 0.5), Vector3d(1, 0.5, 1), Vector3d(0.75, 0.75, 0.75), Vector3d(1.5, 0.75, 1.5), Vector3d(1, 1, 1), Vector3d(2, 1, 2) ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _TABLE_3_CIRCLE_BABY_SCALED = create_variable_definition_from_base( type='table_3', attributes_overrides=['pickupable', 'receptacle'], size_multiplier_list=[Vector3d(0.5, 0.5, 0.5), Vector3d(1, 0.5, 1)], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) _TABLE_3_CIRCLE_ACCESSIBLE = create_variable_definition_from_base( type='table_3', size_multiplier_list=[ Vector3d(1.5, 0.5, 1.5), Vector3d(2, 0.5, 2), Vector3d(1, 1, 1), Vector3d(1.5, 1, 1.5), Vector3d(2, 1, 2), Vector3d(2.5, 1, 2.5) ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) _TABLE_4_SEMICIRCLE_ACCESSIBLE = create_variable_definition_from_base( type='table_4', size_multiplier_list=[ Vector3d(0.75, 0.75, 0.75), Vector3d(1, 0.75, 1), Vector3d(1.25, 0.75, 1.25), Vector3d(1.5, 0.75, 1.5), Vector3d(1, 1, 1), Vector3d(1.25, 1, 1.25), Vector3d(1.5, 1, 1.5), Vector3d(2, 1, 2) ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) # Set the default Z to 0.667 so the table's shape is an exact 2:1 ratio. _TABLE_5_RECT_VECTOR = Vector3d(1, 1, 0.667) _TABLE_5_RECT_ACCESSIBLE = create_variable_definition_from_base( type='table_5', size_multiplier_list=[ multiply(_TABLE_5_RECT_VECTOR, Vector3d(0.25, 0.5, 0.5)), multiply(_TABLE_5_RECT_VECTOR, Vector3d(0.5, 0.5, 0.5)), multiply(_TABLE_5_RECT_VECTOR, Vector3d(0.5, 0.5, 1)), multiply(_TABLE_5_RECT_VECTOR, Vector3d(1, 0.5, 1)), multiply(_TABLE_5_RECT_VECTOR, Vector3d(0.25, 1, 0.5)), multiply(_TABLE_5_RECT_VECTOR, Vector3d(0.5, 1, 0.5)), multiply(_TABLE_5_RECT_VECTOR, Vector3d(0.5, 1, 1)), multiply(_TABLE_5_RECT_VECTOR, Vector3d(1, 1, 1)) ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) # Set the default X and Z so the table's shape is an exact 2:1 ratio. _TABLE_7_RECT_VECTOR = Vector3d(0.98, 1, 0.769) _TABLE_7_RECT_ACCESSIBLE = create_variable_definition_from_base( type='table_7', size_multiplier_list=[ multiply(_TABLE_7_RECT_VECTOR, Vector3d(0.5, 1, 1)), multiply(_TABLE_7_RECT_VECTOR, Vector3d(1, 1, 1)), multiply(_TABLE_7_RECT_VECTOR, Vector3d(1, 1, 2)), multiply(_TABLE_7_RECT_VECTOR, Vector3d(2, 1, 2)), multiply(_TABLE_7_RECT_VECTOR, Vector3d(0.5, 2, 1)), multiply(_TABLE_7_RECT_VECTOR, Vector3d(1, 2, 1)), multiply(_TABLE_7_RECT_VECTOR, Vector3d(1, 2, 2)), multiply(_TABLE_7_RECT_VECTOR, Vector3d(2, 2, 2)) ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) # Set the default X and Z so the table's shape is an exact 1:2 ratio. _TABLE_8_RECT_VECTOR = Vector3d(0.769, 1, 0.98) _TABLE_8_RECT_ACCESSIBLE = create_variable_definition_from_base( type='table_8', size_multiplier_list=[ multiply(_TABLE_8_RECT_VECTOR, Vector3d(1, 1, 0.5)), multiply(_TABLE_8_RECT_VECTOR, Vector3d(1, 1, 1)), multiply(_TABLE_8_RECT_VECTOR, Vector3d(2, 1, 1)), multiply(_TABLE_8_RECT_VECTOR, Vector3d(2, 1, 2)), multiply(_TABLE_8_RECT_VECTOR, Vector3d(1, 2, 0.5)), multiply(_TABLE_8_RECT_VECTOR, Vector3d(1, 2, 1)), multiply(_TABLE_8_RECT_VECTOR, Vector3d(2, 2, 1)), multiply(_TABLE_8_RECT_VECTOR, Vector3d(2, 2, 2)) ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) _TABLE_11_T_LEGS = create_variable_definition_from_base( type='table_11', size_multiplier_list=[ Vector3d(0.5, 0.5, 0.5), Vector3d(0.5, 1, 0.5), Vector3d(1, 0.5, 1), Vector3d(1, 1, 1), ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _TABLE_12_X_LEGS = create_variable_definition_from_base( type='table_12', size_multiplier_list=[ Vector3d(0.5, 0.5, 0.5), Vector3d(0.5, 1, 0.5), Vector3d(1, 0.5, 1), Vector3d(1, 1, 1), ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _TV = create_variable_definition_from_base( type='tv_2', size_multiplier_list=[0.5, 1, 1.5, 2] ) _SHELF_2_TABLE_SQUARE = create_variable_definition_from_base( type='shelf_2', size_multiplier_list=[Vector3d(0.5, 1, 0.5), Vector3d(1, 1, 1)], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) _SHELF_2_TABLE_RECT = create_variable_definition_from_base( type='shelf_2', size_multiplier_list=[ Vector3d(1, 2, 0.5), Vector3d(2, 2, 0.5), Vector3d(2, 3, 0.5), Vector3d(3, 3, 0.5) ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) _SHELF_1_CUBBY_BABY_SCALED = create_variable_definition_from_base( type='shelf_1', attributes_overrides=['pickupable', 'receptacle'], size_multiplier_list=[Vector3d(0.5, 0.5, 0.5), Vector3d(0.75, 0.75, 0.75)], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) _SHELF_1_CUBBY = create_variable_definition_from_base( type='shelf_1', size_multiplier_list=[ Vector3d(1, 1, 1), Vector3d(1.5, 1.5, 1), Vector3d(1.5, 1.5, 1.5), Vector3d(2, 2, 1), Vector3d(2, 2, 1.5), Vector3d(2, 2, 2) ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.WOOD] ) _SOFA_BABY_SCALED = ObjectDefinition( chooseTypeList=[ create_variable_definition_from_base( type='sofa_1', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.333] ), create_variable_definition_from_base( type='sofa_1', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.5] ), create_variable_definition_from_base( type='sofa_2', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.333] ), create_variable_definition_from_base( type='sofa_2', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.5] ), create_variable_definition_from_base( type='sofa_3', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.333] ), create_variable_definition_from_base( type='sofa_3', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.5] ) ] ) _SOFA_1 = create_variable_definition_from_base( type='sofa_1', size_multiplier_list=[Vector3d(0.75, 1, 1), 1, Vector3d(1.25, 1, 1)] ) _SOFA_2 = create_variable_definition_from_base( type='sofa_2', size_multiplier_list=[Vector3d(0.75, 1, 1), 1, Vector3d(1.25, 1, 1)] ) _SOFA_3 = create_variable_definition_from_base( type='sofa_3', size_multiplier_list=[Vector3d(0.75, 1, 1), 1, Vector3d(1.25, 1, 1)] ) _SOFA_CHAIR_BABY_SCALED = ObjectDefinition( chooseTypeList=[ create_variable_definition_from_base( type='sofa_chair_1', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.333] ), create_variable_definition_from_base( type='sofa_chair_1', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.5] ), create_variable_definition_from_base( type='sofa_chair_2', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.333] ), create_variable_definition_from_base( type='sofa_chair_2', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.5] ), create_variable_definition_from_base( type='sofa_chair_3', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.333] ), create_variable_definition_from_base( type='sofa_chair_3', attributes_overrides=['moveable', 'receptacle'], size_multiplier_list=[0.5] ) ] ) _SOFA_CHAIR_1 = create_variable_definition_from_base( type='sofa_chair_1', size_multiplier_list=[1] ) _SOFA_CHAIR_2 = create_variable_definition_from_base( type='sofa_chair_2', size_multiplier_list=[1] ) _SOFA_CHAIR_3 = create_variable_definition_from_base( type='sofa_chair_3', size_multiplier_list=[1] ) # TODO Update this to use ObjectDefinition if needed in the future. # _WARDROBE = { # "type": "wardrobe", # "shape": ["wardrobe"], # "size": "huge", # "mass": 50, # "materialCategory": ["wood"], # "salientMaterials": ["wood"], # "attributes": ["receptacle", "openable", "occluder"], # "enclosedAreas": [{ # # Remove the top drawers and shelves for now. # # "id": "_middle_shelf_right", # # "position": { # # "x": 0.255, # # "y": 1.165, # # "z": 0.005 # # }, # # "dimensions": { # # "x": 0.49, # # "y": 1.24, # # "z": 0.46 # # } # # }, { # # "id": "_middle_shelf_left", # # "position": { # # "x": -0.255, # # "y": 1.295, # # "z": 0.005 # # }, # # "dimensions": { # # "x": 0.49, # # "y": 0.98, # # "z": 0.46 # # } # # }, { # # "id": "_bottom_shelf_left", # # "position": { # # "x": -0.255, # # "y": 0.665, # # "z": 0.005 # # }, # # "dimensions": { # # "x": 0.49, # # "y": 0.24, # # "z": 0.46 # # } # # }, { # # "id": "_lower_drawer_top_left", # # "position": { # # "x": -0.265, # # "y": 0.42, # # "z": 0.015 # # }, # # "dimensions": { # # "x": 0.445, # # "y": 0.16 # # "z": 0.425 # # } # # }, { # # "id": "_lower_drawer_top_right", # # "position": { # # "x": 0.265, # # "y": 0.42, # # "z": 0.015 # # }, # # "dimensions": { # # "x": 0.445, # # "y": 0.16 # # "z": 0.425 # # } # # }, { # "id": "_lower_drawer_bottom_left", # "position": { # "x": -0.265, # "y": 0.21, # "z": 0.015 # }, # "dimensions": { # "x": 0.445, # "y": 0.16, # "z": 0.425 # } # }, { # "id": "_lower_drawer_bottom_right", # "position": { # "x": 0.265, # "y": 0.21, # "z": 0.015 # }, # "dimensions": { # "x": 0.445, # "y": 0.16, # "z": 0.425 # } # }], # "dimensions": { # "x": 1.07, # "y": 2.1, # "z": 1 # }, # "offset": { # "x": 0, # "y": 1.05, # "z": 0.17 # }, # "closedDimensions": { # "x": 1.07, # "y": 2.1, # "z": 0.49 # }, # "closedOffset": { # "x": 0, # "y": 1.05, # "z": 0 # }, # "positionY": 0, # "scale": { # "x": 1, # "y": 1, # "z": 1 # } # } _CASE_1_SUITCASE = create_variable_definition_from_base( type='case_1', size_multiplier_list=[ # Too little to fit a soccer ball inside 1, 1.25, 1.5, # Big enough to fit a soccer ball inside 2, 2.25, 2.5 ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.PLASTIC] ) _CASE_3 = create_variable_definition_from_base( type='case_3', size_multiplier_list=[ # Too little to fit a soccer ball inside 1, 1.25, 1.5, # Big enough to fit a soccer ball inside 2, 2.25, 2.5 ], chosen_material_list=[ChosenMaterial.METAL, ChosenMaterial.PLASTIC] ) _CHEST_1_CUBOID = create_variable_definition_from_base( type='chest_1', size_multiplier_list=[ # Too little to fit a soccer ball inside 0.3, 0.5, # Big enough to fit a soccer ball inside 0.7, 0.9, 1.1, 1.3 ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _CHEST_2_SEMICYLINDER = create_variable_definition_from_base( type='chest_2', size_multiplier_list=[ # Too little to fit a soccer ball inside 0.5, 0.75, # Big enough to fit a soccer ball inside 1.25, 1.5, 1.75, 2 ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _CHEST_3_CUBOID = create_variable_definition_from_base( type='chest_3', size_multiplier_list=[ # Too little to fit a soccer ball inside 0.8, 1.2, # Big enough to fit a soccer ball inside 1.6, 2, 2.4 ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) _CHEST_8_SEMICYLINDER = create_variable_definition_from_base( type='chest_8', size_multiplier_list=[ # Too little to fit a soccer ball inside 0.8, 1.2, # Big enough to fit a soccer ball inside 1.8, 2.4, 3 ], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) # TODO Update this to use ObjectDefinition if needed in the future. # _POTTED_PLANT_LARGE = { # "shape": ["potted plant"], # "size": "large", # "mass": 5, # "materialCategory": [], # "salientMaterials": ["organic", "ceramic"], # "attributes": [], # "chooseType": [{ # "type": "plant_1", # "color": ["green", "brown"], # "dimensions": { # "x": 0.931 * 2, # "y": 0.807 * 2, # "z": 0.894 # }, # "offset": { # "x": -0.114 * 2, # "y": 0.399 * 2, # "z": -0.118 # }, # "positionY": 0, # "scale": { # "x": 2, # "y": 2, # "z": 2 # } # }, { # "type": "plant_5", # "color": ["green", "grey", "brown"], # "dimensions": { # "x": 0.522 * 2, # "y": 0.656 * 2, # "z": 0.62 # }, # "offset": { # "x": -0.024 * 2, # "y": 0.32 * 2, # "z": -0.018 # }, # "positionY": 0, # "scale": { # "x": 2, # "y": 2, # "z": 2 # } # }, { # "type": "plant_7", # "color": ["green", "brown"], # "dimensions": { # "x": 0.72 * 2, # "y": 1.094 * 2, # "z": 0.755 # }, # "offset": { # "x": 0 * 2, # "y": 0.546 * 2, # "z": -0.017 # }, # "positionY": 0, # "scale": { # "x": 2, # "y": 2, # "z": 2 # } # }, { # "type": "plant_9", # "color": ["green", "grey", "brown"], # "dimensions": { # "x": 0.679 * 2, # "y": 0.859 * 2, # "z": 0.546 # }, # "offset": { # "x": 0.037 * 2, # "y": 0.41 * 2, # "z": 0 # }, # "positionY": 0, # "scale": { # "x": 2, # "y": 2, # "z": 2 # } # }, { # "type": "plant_14", # "color": ["red", "brown"], # "dimensions": { # "x": 0.508 * 2, # "y": 0.815 * 2, # "z": 0.623 # }, # "offset": { # "x": 0.036 * 2, # "y": 0.383 * 2, # "z": 0.033 # }, # "positionY": 0, # "scale": { # "x": 2, # "y": 2, # "z": 2 # } # }, { # "type": "plant_16", # "color": ["green", "brown"], # "dimensions": { # "x": 0.702 * 2, # "y": 1.278 * 2, # "z": 0.813 # }, # "offset": { # "x": -0.008 * 2, # "y": 0.629 * 2, # "z": -0.012 # }, # "positionY": 0, # "scale": { # "x": 2, # "y": 2, # "z": 2 # } # }] # } _PICKUPABLES = [ # Arbitrary division: balls [_BALL_PLASTIC, _BALL_NON_PLASTIC, create_soccer_ball()], # Arbitrary division: blocks [_BLOCK_BLANK_CUBE, _BLOCK_BLANK_CYLINDER, _BLOCK_LETTER, _BLOCK_NUMBER], # Arbitrary division: toys [ _TOY_SEDAN, _TOY_RACECAR, _DUCK_ON_WHEELS, _TURTLE_ON_WHEELS ], # Arbitrary division: misc objects [ _APPLE, _BOWL, _CUP, _PLATE, _CRAYON, _PACIFIER ], # Arbitrary division: baby furniture [ _CHAIR_1_BABY_SCALED, _CHAIR_2_STOOL_CIRCLE_BABY_SCALED, _TABLE_1_RECT_BABY_SCALED, _TABLE_3_CIRCLE_BABY_SCALED, _SHELF_1_CUBBY_BABY_SCALED ] ] _NOT_PICKUPABLES = [ # Arbitrary division: shelves [ _BOOKCASE, _BOOKCASE_SIDELESS, _SHELF_1_CUBBY, _SHELF_2_TABLE_SQUARE, _SHELF_2_TABLE_RECT ], # Arbitrary division: chairs [_CHAIR_1, _CHAIR_2_STOOL_CIRCLE, _CHAIR_3_STOOL_RECT, _CHAIR_4_OFFICE], # Arbitrary division: sofas [_SOFA_1, _SOFA_2, _SOFA_3, _SOFA_BABY_SCALED], # Arbitrary division: sofa chairs [_SOFA_CHAIR_1, _SOFA_CHAIR_2, _SOFA_CHAIR_3, _SOFA_CHAIR_BABY_SCALED], # Arbitrary division: rectangular obstacle tables [ _TABLE_1_RECT_ACCESSIBLE, _TABLE_7_RECT_ACCESSIBLE, _TABLE_8_RECT_ACCESSIBLE ], # Arbitrary division: (semi)circular obstacle tables [ _TABLE_2_CIRCLE_ACCESSIBLE, _TABLE_3_CIRCLE_ACCESSIBLE, _TABLE_4_SEMICIRCLE_ACCESSIBLE ], # Arbitrary division: occluder tables [_TABLE_5_RECT_ACCESSIBLE, _TABLE_11_T_LEGS, _TABLE_12_X_LEGS], # Arbitrary division: random objects [ _BLOCK_BLANK_CUBE_NOT_PICKUPABLE, _BLOCK_BLANK_CYLINDER_NOT_PICKUPABLE, _CART, _CRIB, _TV ] # Don't use containers here as possible occluders or context objects ] _CONTAINERS = [ [_CASE_1_SUITCASE], [_CHEST_1_CUBOID], [_CHEST_2_SEMICYLINDER], [_CASE_3], [_CHEST_3_CUBOID], [_CHEST_8_SEMICYLINDER] ] _CONTAINERS_OPEN_TOPPED = [[ # Each definition has multiple available sizes: the first is the smallest # size that can fit the soccer ball, and the rest are bigger sizes. create_variable_definition_from_base( type='bowl_3', size_multiplier_list=[Vector3d(3, 3, 3), Vector3d(3.5, 3.5, 3.5)], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ), create_variable_definition_from_base( type='bowl_4', size_multiplier_list=[Vector3d(2.5, 5, 2.5), Vector3d(3, 6, 3)], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ), create_variable_definition_from_base( type='bowl_6', size_multiplier_list=[Vector3d(3, 4, 3), Vector3d(3.5, 4.5, 3.5)], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ), create_variable_definition_from_base( type='cup_2', size_multiplier_list=[Vector3d(4, 2.5, 4), Vector3d(4.5, 3, 4.5)], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ), create_variable_definition_from_base( type='cup_3', size_multiplier_list=[Vector3d(4, 2.5, 4), Vector3d(4.5, 3, 4.5)], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ), create_variable_definition_from_base( type='cup_6', size_multiplier_list=[Vector3d(4, 3, 4), Vector3d(4, 3.5, 4)], chosen_material_list=[ ChosenMaterial.METAL, ChosenMaterial.PLASTIC, ChosenMaterial.WOOD ] ) ]] _OBSTACLES = list(filter(lambda filtered_list: len(filtered_list) > 0, [ list(filter(lambda item: item.obstacle, object_list)) for object_list in _NOT_PICKUPABLES ])) _OCCLUDERS = list(filter(lambda filtered_list: len(filtered_list) > 0, [ list(filter(lambda item: item.occluder, object_list)) for object_list in _NOT_PICKUPABLES ])) _STACK_TARGETS = list(filter(lambda filtered_list: len(filtered_list) > 0, [ list(filter(lambda item: item.stackTarget, object_list)) for object_list in _NOT_PICKUPABLES ])) _ALL = _PICKUPABLES + _NOT_PICKUPABLES def _get(prop: str) -> Union[ ObjectDefinition, List[ObjectDefinition], List[List[ObjectDefinition]] ]: """Returns a deep copy of the global property with the given name (normally either an object definition or an object definition list).""" return copy.deepcopy(globals()['_' + prop]) def get_container_definition_dataset( # We should only ever set unshuffled to True in a unit test. unshuffled: bool = False ) -> DefinitionDataset: """Returns an immutable dataset of all container definitions.""" return get_dataset( _get('CONTAINERS'), 'CONTAINERS', unshuffled=unshuffled ) def get_container_open_topped_definition_dataset( # We should only ever set unshuffled to True in a unit test. unshuffled: bool = False ) -> DefinitionDataset: """Returns an immutable dataset of all open topped container definitions.""" return get_dataset( _get('CONTAINERS_OPEN_TOPPED'), 'CONTAINERS_OPEN_TOPPED', unshuffled=unshuffled ) def get_interactable_definition_dataset( # We should only ever set unshuffled to True in a unit test. unshuffled: bool = False ) -> DefinitionDataset: """Returns an immutable dataset of all interactable definitions.""" return get_dataset(_get('ALL'), 'ALL', unshuffled=unshuffled) def get_non_pickupable_definition_dataset( # We should only ever set unshuffled to True in a unit test. unshuffled: bool = False ) -> DefinitionDataset: """Returns an immutable dataset of all non-pickupable definitions.""" return get_dataset( _get('NOT_PICKUPABLES'), 'NOT_PICKUPABLES', unshuffled=unshuffled ) def get_obstacle_definition_dataset( # We should only ever set unshuffled to True in a unit test. unshuffled: bool = False ) -> DefinitionDataset: """Returns an immutable dataset of all obstacle definitions.""" return get_dataset( _get('OBSTACLES'), 'OBSTACLES', unshuffled=unshuffled ) def get_occluder_definition_dataset( # We should only ever set unshuffled to True in a unit test. unshuffled: bool = False ) -> DefinitionDataset: """Returns an immutable dataset of all occluder definitions.""" return get_dataset( _get('OCCLUDERS'), 'OCCLUDERS', unshuffled=unshuffled ) def get_pickupable_definition_dataset( # We should only ever set unshuffled to True in a unit test. unshuffled: bool = False ) -> DefinitionDataset: """Returns an immutable dataset of all pickupable definitions.""" return get_dataset( _get('PICKUPABLES'), 'PICKUPABLES', unshuffled=unshuffled ) def get_stack_target_definition_dataset( # We should only ever set unshuffled to True in a unit test. unshuffled: bool = False ) -> DefinitionDataset: """Returns an immutable dataset of all stack target definitions.""" return get_dataset( _get('STACK_TARGETS'), 'STACK_TARGETS', unshuffled=unshuffled )
from django.contrib.auth.base_user import BaseUserManager from django.db import models from django.contrib.auth.models import AbstractBaseUser from django.contrib.auth.models import PermissionsMixin from django.utils import timezone from django.utils.translation import gettext_lazy as _ from django.contrib.auth.validators import UnicodeUsernameValidator class MyUserManager(BaseUserManager): use_in_migrations = True def _create_user(self, username, password, **extra_fields): """ Create and save a user with the given username, email, and password. """ if not username: raise ValueError('The given username must be set') username = self.model.normalize_username(username) user = self.model(username=username, **extra_fields) user.set_password(password) user.save(using=self._db) return user def create_user(self, username, password=None, **extra_fields): extra_fields.setdefault('is_staff', False) extra_fields.setdefault('is_superuser', False) return self._create_user(username, password, **extra_fields) def create_superuser(self, username, password=None, **extra_fields): extra_fields.setdefault('is_staff', True) extra_fields.setdefault('is_superuser', True) if extra_fields.get('is_staff') is not True: raise ValueError('Superuser must have is_staff=True.') if extra_fields.get('is_superuser') is not True: raise ValueError('Superuser must have is_superuser=True.') return self._create_user(username, password, **extra_fields) class User(AbstractBaseUser, PermissionsMixin): pgp_public = models.TextField(null=False, blank=False) two_factor_auth = models.BooleanField(default=False) pgp_private = models.TextField(null=True, blank=True) username_validator = UnicodeUsernameValidator() username = models.CharField( _('username'), max_length=150, unique=True, help_text=_( 'Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.'), validators=[username_validator], error_messages={ 'unique': ("A user with that username already exists."), }, ) is_staff = models.BooleanField( _('staff status'), default=False, help_text=_( 'Designates whether the user can log into this admin site.'), ) is_active = models.BooleanField( _('active'), default=True, help_text=_( 'Designates whether this user should be treated as active. ' 'Unselect this instead of deleting accounts.' ), ) date_joined = models.DateTimeField(_('date joined'), default=timezone.now) objects = MyUserManager() USERNAME_FIELD = 'username' REQUIRED_FIELDS = [] def __str__(self): return self.username
def binary_to_string(binary): return ''.join(chr(int(binary[a:a + 8], 2)) for a in xrange(0, len(binary), 8))
import pandas as pd df = pd.read_csv("values.csv") col = '_file_' for index, row in df.iterrows(): l = str(row["dir"]) if len(l) == 1 : row["dir"]= "00"+l if len(l) == 2 : row["dir"] = "0"+l rep = str("" + str(row["id"]) + "_"+str(row["dir"])+".bmp") print(rep) row[col] = rep df.at[index,col] = rep df.to_csv('image_clean.csv', index=False, sep=',')
from .Lexicon import Lexicon from .Stories import Stories __all__ = ['Lexicon', 'Stories']
from core.modules.OTModule import OTModule from pyforms import BaseWidget from PyQt4 import QtGui from PyQt4 import QtCore import plugins import sys import pickle import os import traceback class OTModuleProject(OTModule): """ Implements the project management module """ _controller = None #: modules controller _projectDir = None #: Project dir _modules = {} #: Dict of the project modules _modulesOrdered = [] #: List of the project modules ordered def __init__(self, controller, name = None, dir = None): OTModule.__init__(self, name) self._modules = {} self._modulesOrdered = [] self._projectDir = dir self.controller = controller ############################################################################ ############ Project functions ############################################ ############################################################################ def openProject(self, projectDir, file): """ Open a project @param projectDir: Project directory @type projectDir: String @param file: Project file name @type file: String """ #try: self._projectDir = projectDir filename = '%s/%s' % (self._projectDir, file) try: pkl_file = open(filename, 'rb'); project_data = pickle.load(pkl_file); pkl_file.close() except Exception, err: print str(err) if( os.path.isfile(filename+'.bak') ): QtGui.QMessageBox.critical(None, "Error", "Sorry there was an error importing the project!\nWe detect a backup from a previous version of the project, and we will import it instead!") #Open the backup file in case of error pkl_file = open(filename+".bak", 'rb'); project_data = pickle.load(pkl_file); pkl_file.close() data = dict(project_data) self.load(project_data) self.moduleUpdatedEvent(self) return data def saveProject(self, project_data = {} ): """Save project""" filename = '%s/%s' % (self._projectDir, "project.ot") if( os.path.isfile(filename) ): try: os.remove(filename+".bak") except Exception, err: print str(err) os.rename(filename, filename+".bak" ) self.project_data = project_data self.save(project_data) output = open(filename, 'wb') pickle.dump(self.project_data, output) output.close() def findModulesWithName(self, name): haveSameName = [] for mod in self.modulesOrdered: if mod.name==name: haveSameName.append(mod) return haveSameName def __add__(self, module): """ Add module to the project @param module: Module object @type module: OTModule """ #check if the name exists: haveSameName = self.findModulesWithName(module.name) if len(haveSameName)>0: module.name += ' '+str(len(haveSameName)) #################################### module.parentModule = self self._modules[module.uid] = module self._modulesOrdered.append(module) self.addModuleEvent(module) self.moduleUpdatedEvent(self) for mod in self._modulesOrdered: mod.updateControls() return self def __sub__(self, module): """ Remove module from the project @param module: Module object @type module: OTModule """ del self._modules[module.uid] self._modulesOrdered.remove(module) self.moduleUpdatedEvent(self) for mod in self._modulesOrdered: mod.updateControls() return self ############################################################################ ############ Parent class functions reemplementation ####################### ############################################################################ def close(self): """ OTModule.close reimplementation """ for mod in self.modulesOrdered: print "Closing module %s" % mod.name mod.close() def save(self, saver): """ OTModule.saveContent reimplementation """ OTModule.save(self, saver) saver['childs'] = [] for module in self.modulesOrdered: dataToSave = {} module.save(dataToSave) saver['childs'].append(dataToSave) def load(self, loader): """ OTModule.loadContent reimplementation """ OTModule.load(self, loader) for saved in loader['childs']: module = saved['class'](saved['name']) self += (module) if isinstance(module, BaseWidget): module.initForm() uid2Remove = self._modulesOrdered[-1].uid module.load(saved) del self._modules[uid2Remove] self._modules[module.uid] = module ############################################################################ ############ Events ######################################################## ############################################################################ def moduleUpdatedEvent(self, module): """ Event called when a project module is updated @param module: the project was updated @type module: OTModuleProject """ pass def addModuleEvent(self, module): """ Event called when a module is added to the project @param module: A new module was added to the project @type module: OTModuleProject """ pass ############################################################################ ############ Properties #################################################### ############################################################################ @property def modulesOrdered(self): """Return all the project modules ordered""" if self._modulesOrdered == None: return [] else: return self._modulesOrdered ############################################################################ @property def controller(self): """Return the controller""" return self._controller @controller.setter def controller(self, value): self._controller = value
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Author: Jie Yang, Wei wu, Xiaoy LI # Last update: 2019.03.12 # First create: 2017.06.15 # Concate: # import os import sys root_path = "/".join(os.path.realpath(__file__).split("/")[:-3]) if root_path not in sys.path: sys.path.insert(0, root_path) import torch import torch.optim as optim import torch.autograd as autograd import gc import time import logging import argparse import datetime import numpy as np from glyce.model.latticeLSTM.model.bilstmcrf import BiLSTMCRF as SeqModel from glyce.model.latticeLSTM.utils.data import Data from glyce.model.latticeLSTM.utils.metric import get_ner_fmeasure parser = argparse.ArgumentParser(description='Tuning with bi-directional LSTM-CRF') parser.add_argument('--status', choices=['train', 'test', 'decode'], help='update algorithm', default='train') parser.add_argument('--name', type=str, default='CTB9POS') parser.add_argument('--mode', type=str, default='char') parser.add_argument('--data_dir', type=str, default='/data/nfsdata/nlp/datasets/sequence_labeling/CN_NER/') parser.add_argument('--raw', type=str) parser.add_argument('--loadmodel', type=str) parser.add_argument('--gpu_id', type=int, default=0) parser.add_argument('--gaz_dropout', type=float, default=0.5) parser.add_argument('--HP_lr', type=float, default=0.01) parser.add_argument('--HP_dropout', type=float, default=0.5) parser.add_argument('--HP_use_glyph', action='store_true') parser.add_argument('--HP_glyph_ratio', type=float, default=0.1) parser.add_argument('--HP_font_channels', type=int, default=2) parser.add_argument('--HP_glyph_highway', action='store_true') parser.add_argument('--HP_glyph_layernorm', action='store_true') parser.add_argument('--HP_glyph_batchnorm', action='store_true') parser.add_argument('--HP_glyph_embsize', type=int, default=64) parser.add_argument('--HP_glyph_output_size', type=int, default=64) parser.add_argument('--HP_glyph_dropout', type=float, default=0.7) parser.add_argument('--HP_glyph_cnn_dropout', type=float, default=0.5) parser.add_argument('--setting_str', type=str, default='') parser.add_argument('--src_folder', type=str, default='/data/nfsdata/nlp/projects/wuwei') args = parser.parse_args() os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id) save_dir = F'{args.src_folder}/{args.setting_str}.' if not os.path.isdir(save_dir): os.makedirs(save_dir) logger = logging.getLogger() # pylint: disable=invalid-name logger.setLevel(logging.DEBUG) fh = logging.FileHandler(os.path.join(save_dir, 'run.log')) fh.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') fh.setFormatter(formatter) ch.setFormatter(formatter) logger.addHandler(fh) logger.addHandler(ch) logger.info(args) seed_num = 42 random.seed(seed_num) torch.manual_seed(seed_num) np.random.seed(seed_num) def data_initialization(data, gaz_file, train_file, dev_file, test_file): data.build_alphabet(train_file) data.build_alphabet(dev_file) data.build_alphabet(test_file) data.build_gaz_file(gaz_file) data.build_gaz_alphabet(train_file) data.build_gaz_alphabet(dev_file) data.build_gaz_alphabet(test_file) data.fix_alphabet() return data def predict_check(pred_variable, gold_variable, mask_variable): """ input: pred_variable (batch_size, sent_len): pred tag result, in numpy format gold_variable (batch_size, sent_len): gold result variable mask_variable (batch_size, sent_len): mask variable """ pred = pred_variable.cpu().data.numpy() gold = gold_variable.cpu().data.numpy() mask = mask_variable.cpu().data.numpy() overlaped = (pred == gold) right_token = np.sum(overlaped * mask) total_token = mask.sum() return right_token, total_token def recover_label(pred_variable, gold_variable, mask_variable, label_alphabet, word_recover): """ input: pred_variable (batch_size, sent_len): pred tag result gold_variable (batch_size, sent_len): gold result variable mask_variable (batch_size, sent_len): mask variable """ pred_variable = pred_variable[word_recover] gold_variable = gold_variable[word_recover] mask_variable = mask_variable[word_recover] seq_len = gold_variable.size(1) mask = mask_variable.cpu().data.numpy() pred_tag = pred_variable.cpu().data.numpy() gold_tag = gold_variable.cpu().data.numpy() batch_size = mask.shape[0] pred_label = [] gold_label = [] for idx in range(batch_size): pred = [label_alphabet.get_instance(pred_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0] gold = [label_alphabet.get_instance(gold_tag[idx][idy]) for idy in range(seq_len) if mask[idx][idy] != 0] # logger.info "p:",pred, pred_tag.tolist() # logger.info "g:", gold, gold_tag.tolist() assert(len(pred)==len(gold)) pred_label.append(pred) gold_label.append(gold) return pred_label, gold_label def load_data_setting(save_dir): with open(save_dir, 'rb') as fp: data = torch.load(fp) logger.info("Data setting loaded from file: " + save_dir) data.show_data_summary() return data def lr_decay(optimizer, epoch, decay_rate, init_lr): lr = init_lr * ((1-decay_rate)**epoch) logger.info(F" Learning rate is setted as: {lr}") for param_group in optimizer.param_groups: param_group['lr'] = lr return optimizer def evaluate(data, model, name): if name == "train": instances = data.train_Ids elif name == "dev": instances = data.dev_Ids elif name == 'test': instances = data.test_Ids elif name == 'raw': instances = data.raw_Ids else: logger.info("Error: wrong evaluate name," + name) pred_results = [] gold_results = [] model.eval() start_time = time.time() train_num = len(instances) total_batch = train_num//data.HP_batch_size+1 for batch_id in range(total_batch): start = batch_id*data.HP_batch_size end = (batch_id+1)*data.HP_batch_size if end > train_num: end = train_num instance = instances[start:end] if not instance: continue gaz_list, batch_word, batch_biword, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask = batchify_with_label(instance, data.HP_gpu, True) tag_seq = model(gaz_list,batch_word, batch_biword, batch_wordlen, batch_char, batch_charlen, batch_charrecover, mask) # logger.info("tag_seq", tag_seq) pred_label, gold_label = recover_label(tag_seq, batch_label, mask, data.label_alphabet, batch_wordrecover) pred_results += pred_label gold_results += gold_label decode_time = time.time() - start_time speed = len(instances)/decode_time acc, p, r, f = get_ner_fmeasure(gold_results, pred_results, data.tagScheme) return speed, acc, p, r, f, pred_results def batchify_with_label(input_batch_list, gpu, volatile_flag=False): """ input: list of words, chars and labels, various length. [[words,biwords,chars,gaz, labels],[words,biwords,chars,labels],...] words: word ids for one sentence. (batch_size, sent_len) chars: char ids for on sentences, various length. (batch_size, sent_len, each_word_length) output: zero padding for word and char, with their batch length word_seq_tensor: (batch_size, max_sent_len) Variable word_seq_lengths: (batch_size,1) Tensor char_seq_tensor: (batch_size*max_sent_len, max_word_len) Variable char_seq_lengths: (batch_size*max_sent_len,1) Tensor char_seq_recover: (batch_size*max_sent_len,1) recover char sequence order label_seq_tensor: (batch_size, max_sent_len) mask: (batch_size, max_sent_len) """ batch_size = len(input_batch_list) words = [sent[0] for sent in input_batch_list] biwords = [sent[1] for sent in input_batch_list] chars = [sent[2] for sent in input_batch_list] gazs = [sent[3] for sent in input_batch_list] labels = [sent[4] for sent in input_batch_list] word_seq_lengths = torch.LongTensor(list(map(len, words))) max_seq_len = word_seq_lengths.max() word_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len)), volatile = volatile_flag).long() biword_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len)), volatile = volatile_flag).long() label_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len)),volatile = volatile_flag).long() mask = autograd.Variable(torch.zeros((batch_size, max_seq_len)),volatile = volatile_flag).byte() for idx, (seq, biseq, label, seqlen) in enumerate(zip(words, biwords, labels, word_seq_lengths)): word_seq_tensor[idx, :seqlen] = torch.LongTensor(seq) biword_seq_tensor[idx, :seqlen] = torch.LongTensor(biseq) label_seq_tensor[idx, :seqlen] = torch.LongTensor(label) mask[idx, :seqlen] = torch.Tensor([1]*seqlen) word_seq_lengths, word_perm_idx = word_seq_lengths.sort(0, descending=True) word_seq_tensor = word_seq_tensor[word_perm_idx] biword_seq_tensor = biword_seq_tensor[word_perm_idx] ## not reorder label label_seq_tensor = label_seq_tensor[word_perm_idx] mask = mask[word_perm_idx] ### deal with char # pad_chars (batch_size, max_seq_len) pad_chars = [chars[idx] + [[0]] * (max_seq_len-len(chars[idx])) for idx in range(len(chars))] length_list = [list(map(len, pad_char)) for pad_char in pad_chars] max_word_len = max(list(map(max, length_list))) char_seq_tensor = autograd.Variable(torch.zeros((batch_size, max_seq_len, max_word_len)), volatile = volatile_flag).long() char_seq_lengths = torch.LongTensor(length_list) for idx, (seq, seqlen) in enumerate(zip(pad_chars, char_seq_lengths)): for idy, (word, wordlen) in enumerate(zip(seq, seqlen)): # logger.info len(word), wordlen char_seq_tensor[idx, idy, :wordlen] = torch.LongTensor(word) char_seq_tensor = char_seq_tensor[word_perm_idx].view(batch_size*max_seq_len,-1) char_seq_lengths = char_seq_lengths[word_perm_idx].view(batch_size*max_seq_len,) char_seq_lengths, char_perm_idx = char_seq_lengths.sort(0, descending=True) char_seq_tensor = char_seq_tensor[char_perm_idx] _, char_seq_recover = char_perm_idx.sort(0, descending=False) _, word_seq_recover = word_perm_idx.sort(0, descending=False) # keep the gaz_list in orignial order gaz_list = [ gazs[i] for i in word_perm_idx] gaz_list.append(volatile_flag) if gpu: word_seq_tensor = word_seq_tensor.cuda() biword_seq_tensor = biword_seq_tensor.cuda() word_seq_lengths = word_seq_lengths.cuda() word_seq_recover = word_seq_recover.cuda() label_seq_tensor = label_seq_tensor.cuda() char_seq_tensor = char_seq_tensor.cuda() char_seq_recover = char_seq_recover.cuda() mask = mask.cuda() return gaz_list, word_seq_tensor, biword_seq_tensor, word_seq_lengths, word_seq_recover, char_seq_tensor, char_seq_lengths, char_seq_recover, label_seq_tensor, mask def train(data, save_model_dir, seg=True): logger.info("Training model...") data.show_data_summary() model = SeqModel(data) logger.info("finished built model.") parameters = [p for p in model.parameters() if p.requires_grad] optimizer = optim.SGD(parameters, lr=data.HP_lr, momentum=data.HP_momentum) best_dev = -1 data.HP_iteration = 100 # start training for idx in range(data.HP_iteration): epoch_start = time.time() temp_start = epoch_start logger.info(("Epoch: %s/%s" %(idx,data.HP_iteration))) optimizer = lr_decay(optimizer, idx, data.HP_lr_decay, data.HP_lr) instance_count = 0 sample_loss = 0 batch_loss = 0 total_loss = 0 right_token = 0 whole_token = 0 random.shuffle(data.train_Ids) model.train() model.zero_grad() train_num = len(data.train_Ids) total_batch = train_num//data.HP_batch_size+1 for batch_id in range(total_batch): start = batch_id*data.HP_batch_size end = min((batch_id+1)*data.HP_batch_size, train_num) instance = data.train_Ids[start:end] if not instance: continue gaz_list, batch_word, batch_biword, batch_wordlen, batch_wordrecover, batch_char, batch_charlen, batch_charrecover, batch_label, mask = batchify_with_label(instance, data.HP_gpu) instance_count += 1 loss, tag_seq = model.neg_log_likelihood_loss(gaz_list, batch_word, batch_biword, batch_wordlen, batch_char, batch_charlen, batch_charrecover, batch_label, mask) right, whole = predict_check(tag_seq, batch_label, mask) right_token += right whole_token += whole sample_loss += loss.data[0] total_loss += loss.data[0] batch_loss += loss if end % 500 == 0: temp_time = time.time() temp_cost = temp_time - temp_start temp_start = temp_time logger.info((" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f" % (end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))) sys.stdout.flush() sample_loss = 0 if end % data.HP_batch_size == 0: batch_loss.backward() optimizer.step() model.zero_grad() batch_loss = 0 temp_time = time.time() temp_cost = temp_time - temp_start logger.info((" Instance: %s; Time: %.2fs; loss: %.4f; acc: %s/%s=%.4f" % (end, temp_cost, sample_loss, right_token, whole_token,(right_token+0.)/whole_token))) epoch_finish = time.time() epoch_cost = epoch_finish - epoch_start logger.info(("Epoch: %s training finished. Time: %.2fs, speed: %.2fst/s, total loss: %s" % (idx, epoch_cost, train_num/epoch_cost, total_loss))) speed, acc, p, r, f, _ = evaluate(data, model, "dev") dev_finish = time.time() dev_cost = dev_finish - epoch_finish if seg: current_score = f logger.info(("Dev: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f" % (dev_cost, speed, acc, p, r, f))) else: current_score = acc logger.info(("Dev: time: %.2fs speed: %.2fst/s; acc: %.4f"%(dev_cost, speed, acc))) if current_score > best_dev: if seg: logger.info(F"Exceed previous best f score: {best_dev}") else: logger.info(F"Exceed previous best acc score: {best_dev}") model_name = os.path.join(save_model_dir, 'saved.model') torch.save(model.state_dict(), model_name) best_dev = current_score # ## decode test # speed, acc, p, r, f, _ = evaluate(data, model, "test") # test_finish = time.time() # test_cost = test_finish - dev_finish # if seg: # logger.info(("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%(test_cost, speed, acc, p, r, f))) # else: # logger.info(("Test: time: %.2fs, speed: %.2fst/s; acc: %.4f"%(test_cost, speed, acc))) gc.collect() def load_model_decode(save_dir, data): logger.info("Load Model from file: " + save_dir) model = SeqModel(data) model.load_state_dict(torch.load(save_dir)) logger.info(F"Decode dev data ...") start_time = time.time() speed, acc, p, r, f, pred_results = evaluate(data, model, 'dev') end_time = time.time() time_cost = end_time - start_time logger.info(("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%('dev', time_cost, speed, acc, p, r, f))) logger.info(F"Decode test data ...") start_time = time.time() speed, acc, p, r, f, pred_results = evaluate(data, model, 'test') end_time = time.time() time_cost = end_time - start_time logger.info(("%s: time:%.2fs, speed:%.2fst/s; acc: %.4f, p: %.4f, r: %.4f, f: %.4f"%('test', time_cost, speed, acc, p, r, f))) if __name__ == '__main__': char_emb = '/data/nfsdata/nlp/embeddings/chinese/gigaword/gigaword_chn.all.a2b.uni.ite50.vec' bichar_emb = '' # bichar_emb = '/data/nfsdata/nlp/embeddings/chinese/gigaword/gigaword_chn.all.a2b.bi.ite50.vec' ctb_gaz = '/data/nfsdata/nlp/embeddings/chinese/ctb/ctb.50d.vec' # NER wiki_gaz = '/data/nfsdata/nlp/embeddings/chinese/wiki/zh.wiki.bpe.vs200000.d50.w2v.txt' gaz_file = ctb_gaz if 'NER' in args.name else wiki_gaz train_file = F'{args.data_dir}/{args.name}/train.{args.mode}.bmes' dev_file = F'{args.data_dir}/{args.name}/dev.{args.mode}.bmes' test_file = F'{args.data_dir}/{args.name}/test.{args.mode}.bmes' logger.info("Train file:" + train_file) logger.info("Dev file:" + dev_file) logger.info("Test file:" + test_file) logger.info("Char emb:" + char_emb) logger.info("Bichar emb:" + bichar_emb) logger.info("Gaz file:" + gaz_file) logger.info("Save dir:" + save_dir) sys.stdout.flush() if args.status == 'train': data = Data() data.HP_use_char = False data.use_bigram = False if 'NER' in args.name else True # ner: False, cws: True data.gaz_dropout = args.gaz_dropout data.HP_lr = 0.015 if 'NER' in args.name else 0.01 data.HP_dropout = args.HP_dropout data.HP_use_glyph = args.HP_use_glyph data.HP_glyph_ratio = args.HP_glyph_ratio data.HP_font_channels = args.HP_font_channels data.HP_glyph_highway = args.HP_glyph_highway data.HP_glyph_embsize = args.HP_glyph_embsize data.HP_glyph_output_size = args.HP_glyph_output_size data.HP_glyph_dropout = args.HP_glyph_dropout data.HP_glyph_cnn_dropout = args.HP_glyph_cnn_dropout data.HP_glyph_batchnorm = args.HP_glyph_batchnorm data.HP_glyph_layernorm = args.HP_glyph_layernorm data.norm_gaz_emb = False if 'NER' in args.name else True # ner: False, cws: True data.HP_fix_gaz_emb = False data_initialization(data, gaz_file, train_file, dev_file, test_file) data.generate_instance_with_gaz(train_file, 'train') data.generate_instance_with_gaz(dev_file, 'dev') data.generate_instance_with_gaz(test_file, 'test') data.build_word_pretrain_emb(char_emb) data.build_biword_pretrain_emb(bichar_emb) data.build_gaz_pretrain_emb(gaz_file) torch.save(data, save_dir + '/data.set') data = torch.load(save_dir + '/data.set') train(data, save_dir) elif args.status == 'test': data = load_data_setting(args.loadmodel + '/data.set') load_model_decode(args.loadmodel + '/saved.model', data) # load_model_decode(args.loadmodel + '/saved.model', data, 'test') # elif args.status == 'decode': # data = load_data_setting(args.loadmodel + '/data.set') # data.generate_instance_with_gaz(args.raw, 'raw') # decode_results = load_model_decode(args.loadmodel + '/saved.model', data, 'raw') # data.write_decoded_results(args.loadmodel + '/decoded.output', decode_results, 'raw') else: logger.info("Invalid argument! Please use valid arguments! (train/test/decode)")
# Importing necessary modules & libraries from django.urls import path from core import views # Setting the app_name and url paths app_name = "core" urlpatterns = [ path('', views.index, name="index"), path('analytics', views.analytics, name="analytics"), path('user_login', views.user_login, name="user_login"), path('user_logout', views.user_logout, name="user_logout"), path('download_BUF', views.download_BUF, name="download_BUF") ]
#!/usr/bin/env python import string import random import uuid import logging from flask import Flask from flask import request, jsonify from tacker_agent import * from shutil import rmtree from database import DatabaseConnection from copy import deepcopy from werkzeug.contrib.cache import MemcachedCache # LOGGING # basicConfig sets up all the logs from libraries logging.basicConfig(level=logging.INFO, format="%(asctime)s %(levelname)-8s %(name)-12s %(message)s") logger = logging.getLogger('core') # logger.setLevel(logging.INFO) # # create console handler and set level to debug # ch = logging.StreamHandler() # # ch.setLevel(logging.DEBUG) # # create formatter # formatter = logging.Formatter('%(asctime)s - %(levelname)-8s - %(name)-20s : %(message)s') # # add formatter to ch # ch.setFormatter(formatter) # # add ch to logger # logger.addHandler(ch) # # LOGGING conf up to here tacker_agent = TackerAgent() app = Flask(__name__) def unique_id(): # return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(8)) return str(uuid.uuid4()) def vnfd_json_yaml_parser(vnfd): try: vnfd = json.loads(vnfd) except ValueError: try: raw_vnfd = yaml.load(vnfd) try: attr_vnfd = dict() attr_vnfd['tosca_definitions_version'] = raw_vnfd['tosca_definitions_version'] attr_vnfd['metadata'] = raw_vnfd['metadata'] attr_vnfd['description'] = raw_vnfd['description'] attr_vnfd['topology_template'] = raw_vnfd['topology_template'] attributes = {'vnfd': attr_vnfd} head = dict() head['description'] = raw_vnfd['description'] head['service_types'] = [{'service_type': 'vnfd'}] head['attributes'] = attributes vnfd = dict() vnfd['vnfd'] = head except KeyError as e: msg = "YAML error format: %s" % e logger.error(msg) return ERROR, msg except yaml.YAMLError: msg = "VNFD should be in JSON or YAML format!" logger.error(msg) return ERROR, msg vnfd['vnfd']['name'] = unique_id() return OK, vnfd @app.route('/package', methods=['POST']) def include_package(): """ Includes a VNF Package in the local catalog and in the file repository :return: an 'OK' JSON status """ # TODO: consider also upload the VNF Image to the Openstack Tacker database = DatabaseConnection() vnfd = request.json['vnfd'] # vnf = request.json['vnf'] descriptor = json.loads(request.json['descriptor']) category = descriptor['category'] vnf_type = descriptor['type'] vnf_description = descriptor['description'] vnfd_status, data = vnfd_json_yaml_parser(vnfd) if vnfd_status == OK: vnfd = data else: return jsonify({'status': vnfd_status, 'reason': data}) vnfd_name = vnfd['vnfd']['name'] if vnf_type not in [CLICK_VNF, GENERAL_VNF]: return jsonify({'status': ERROR, 'reason': 'VNF Package unknown type: %s' % vnf_type}) dir_id = unique_id() dir_name = 'repository/' + dir_id os.makedirs(dir_name) vnfd = json.dumps(vnfd, indent=2, sort_keys=True) vnfd_file = open(dir_name + '/vnfd.json', 'w') vnfd_file.write(vnfd) vnfd_file.close() if vnf_type == CLICK_VNF: vnf = request.json['vnf'] vnf_file = open(dir_name + '/vnf.click', 'w') vnf_file.write(vnf) vnf_file.close() # TODO: needs to add a function name result = database.insert_vnf_package(category, vnfd_name, dir_id, vnf_type, vnf_description) if result == OK: return jsonify({'status': OK}) else: return jsonify({'status': result[0], 'reason': result[1]}) @app.route('/package/<pkg_id>', methods=['DELETE']) def delete_package(pkg_id): """ Remove a VNF Package from the local catalog and its repository files :param pkg_id: VNF Package identifier :return: a JSON with status and error reason if it occurs """ database = DatabaseConnection() _, data = database.list_vnf_instances(vnf_pkg_id=pkg_id) if len(data) > 0: return jsonify({'status': ERROR, 'reason': "A VNF Instance depends on this VNF Package!"}) resp, catalog = database.list_catalog(pkg_id) pkg_dir_id = catalog[0]['dir_id'] if resp == OK: result = database.remove_vnf_package(pkg_id) if result == OK: try: rmtree('repository/' + pkg_dir_id) except OSError as e: logger.error("%s '%s'", e.strerror, e.filename) return jsonify({'status': ERROR, 'reason': e.strerror}) return jsonify({'status': OK}) return jsonify({'status': ERROR, 'reason': status[404]}) @app.route('/catalog', methods=['GET']) def list_catalog(): database = DatabaseConnection() _, vnfs = database.list_catalog() return jsonify({'vnfs': vnfs}) @app.route('/vnfs/<vnf_pkg_id>', methods=['POST']) def instantiate_vnf(vnf_pkg_id): """ Instantiates a given VNF in NFVO :param vnf_pkg_id: :return: flask.Response: OK, vnfd_id, vnf_id and vnf_ip if success, or ERROR and its reason if not """ database = DatabaseConnection() resp, catalog = database.list_catalog(vnf_pkg_id=vnf_pkg_id) if resp != OK: jsonify({'status': ERROR, 'reason': 'VNF Package not found!'}) dir_id = catalog[0]['dir_id'] vnfd_name = catalog[0]['vnfd_name'] vnf_type = catalog[0]['vnf_type'] vnfd_path = 'repository/%s/vnfd.json' % dir_id with open(vnfd_path) as vnfd_file: vnfd_data = vnfd_file.read() vnfd_data = json.loads(vnfd_data) vnfd_data['vnfd']['name'] = vnfd_name if vnf_type == CLICK_VNF: function_path = 'repository/%s/vnf.click' % dir_id with open(function_path) as function_file: function_data = function_file.read() response = tacker_agent.vnf_create(vnfd_data, unique_id(), function_data) else: response = tacker_agent.vnf_create(vnfd_data, unique_id()) if response['status'] != OK: return jsonify(response) db_res, data = database.insert_vnf_instance(vnf_pkg_id, response['vnfd_id'], response['vnf_id']) # Rollback actions if database inserting fails if db_res != OK: error_message = 'Database error: %s' % data logger.error("Executing rollback actions...\n%s", error_message) resp_delete = tacker_agent.vnf_delete(response['vnf_id']) if resp_delete == OK: logger.info("Rollback done!") else: error_message.join([' ', resp_delete['reason']]) logger.error(error_message) return jsonify({'status': ERROR, 'reason': error_message}) # return instantiated vnf data return jsonify(response) @app.route('/vnfs/<vnf_id>', methods=['DELETE']) def destroy_vnf(vnf_id): """Destroys a given VNF in NFVO :param vnf_id: the NFVO VNF's ID :return: OK if success, or ERROR and its reason if not """ database = DatabaseConnection() response = tacker_agent.vnf_delete(vnf_id) if response['status'] != OK: return jsonify(response) res, vnf_instance = database.list_vnf_instances(vnf_id=vnf_id) if res != OK: return jsonify(response) if len(vnf_instance) > 0: database.remove_vnf_instance(vnf_instance[0]['_id']) return jsonify({'status': OK}) @app.route('/vnfs', methods=['GET']) def list_vnfs(): """List all instantiated VNFs in NFVO""" response = tacker_agent.vnf_list() return jsonify({'vnfs': response}) def get_vnf_nfvo_resource_id(vnf_id, resource_name): response = tacker_agent.vnf_resources(vnf_id) if response['status'] != OK: return response for resource in response['resources']: if resource['name'] == resource_name: return { 'status': OK, 'resource_id': resource['id'] } return { 'status': ERROR, 'reason': 'VNF Resource ID not found!' } @app.route('/vnfs/package/<vnf_id>', methods=['GET']) def get_vnf_package(vnf_id): """Retrieves a VNF Package stored in Catalog from a given NFVO VNF ID :param vnf_id: :return: """ database = DatabaseConnection() res, data = database.list_vnf_instances(vnf_id=vnf_id) if res == OK: if data: return jsonify({'status': OK, 'package': data[0]}) else: return jsonify({'status': ERROR, 'reason': 'VNF Package not found in Catalog!'}) # if happens a database error return jsonify({'status': ERROR, 'reason': data}) @app.route('/sfc/acl', methods=['GET']) def list_acl(): # TODO: check the last ACL criteria in Tacker API # TODO: check whether we can get all ACL criteria directly calling the tacker REST API acl_criterias = { 'eth_type': 'Specifies Ethernet frame type (See IEEE 802.3)', 'eth_src': 'Ethernet source address', 'eth_dst': 'Ethernet destination address', 'vlan_id': 'VLAN ID', 'vlan_pcp': 'VLAN Priority', 'mpls_label': 'MPLS Label', 'mpls_tc': 'MPLS Traffic Class', 'ip_dscp': 'IP DSCP (6 bits in ToS field)', 'ip_ecn': 'IP ECN (2 bits in ToS field)', 'ip_src_prefix': 'IP source address prefix', 'ip_dst_prefix': 'IP destination address prefix', 'ip_proto': 'IP protocol number', 'tenant_id': 'OpenStack Tenant ID', 'icmpv4_type': 'ICMP type', 'icmpv4_code': 'ICMP code', 'arp_op': 'ARP opcode', 'arp_spa': 'ARP source ipv4 address', 'arp_tpa': 'ARP target ipv4 address', 'arp_sha': 'ARP source hardware address', 'arp_tha': 'ARP target hardware address', 'ipv6_src': 'IPv6 source address', 'ipv6_dst': 'IPv6 destination address', 'ipv6_flabel': 'IPv6 Flow Label', 'icmpv6_type': 'ICMPv6 type', 'icmpv6_code': 'ICMPv6 code', 'ipv6_nd_target': 'Target address for ND', 'ipv6_nd_sll': 'Source link-layer for ND', 'ipv6_nd_tll': 'Target link-layer for ND', 'destination_port_range': 'Target port range' } return jsonify({'acl': acl_criterias}) @app.route('/sfc/uuid', methods=['GET']) def get_sfc_uuid(): """Retrieves a unique identifier in order to compose a SFC Retrieves a uuid4 identifier to compose a SFC and get a copy of the vnffg template. :return: a unique identifier str. """ vnffgd = deepcopy(vnffgd_template) sfc_uuid = str(uuid.uuid4()) cache = MemcachedCache() cache.set(sfc_uuid, vnffgd) return jsonify({'sfc_uuid': sfc_uuid}) @app.route('/vnfp/cps/<vnf_pkg_id>', methods=['GET']) def list_vnf_pkg_cps(vnf_pkg_id, internal=False): """Retrieve all connection points of a VNF Package stored in repository If using an internal call directly to this function use the :param internal param as True. In internal call it returns just an dict. However in external calls it returns an HTTP Response json dict. :param vnf_pkg_id: VNF Package ID :param internal=False used to distinguish internal (function) and external (HTTP) calls to the server :return: a dict with all connection points """ database = DatabaseConnection() _, data = database.list_catalog(vnf_pkg_id) dir_id = data[0]['dir_id'] vnfd_path = 'repository/%s/vnfd.json' % dir_id with open(vnfd_path) as vnfd_file: vnfd_data = vnfd_file.read() vnfd_data = json.loads(vnfd_data) node_templates = vnfd_data['vnfd']['attributes']['vnfd']['topology_template']['node_templates'] cps = {} for atr in node_templates.keys(): if 'tosca.nodes.nfv.CP' in node_templates[atr]['type']: virtual_link = node_templates[atr]['requirements'][0]['virtualLink']['node'] network_name = node_templates[virtual_link]['properties']['network_name'] cps[atr] = {'virtual_link': virtual_link, 'network_name': network_name} if not internal: return jsonify(cps) else: return cps @app.route('/sfc/sfp/compose', methods=['POST']) def compose_sfp(): """Performs VNF Chaining in the VNFFG Template. This function stands for VNF Chaining and its requirements for CPs and VLs using the VNFFG Template. The first interface is reserved for the VNF management interface, and thus it is not used for VNF chaining. The following rules are taken into account: - cp_in: chooses the cp_in according to the same network of the prior cp_out. If the VNF is the first one, then the first CP is chosen (disregarding the management interface) - cp_out: if the given VNF has just one CP for VNF chaining, then cp_out = cp_in. Otherwise, cp_out is chosen taking into account NFVO requirements implemented in the related agents. If cp_out can not be selected automatically, a message with OPTIONS status is returned in order to the user inform the desirable and suitable connection point. Input params must be in JSON format, which are: - sfc_uuid: the unique identifier for the SFC being composed - vnf_pkg_id: always required - cp_out: not required, but can be used as a manually user input :return: OK if success, or ERROR and its reason if not, or OPTIONS and an cp_list dict """ vnffgd = None cache = MemcachedCache() if 'sfc_uuid' in request.json: vnffgd = cache.get(request.json['sfc_uuid']) if not vnffgd: return jsonify({'status': ERROR, 'reason': 'SFC UUID not found!'}) database = DatabaseConnection() vnf_pkg_id = request.json['vnf_pkg_id'] vnf_pkg_cps = list_vnf_pkg_cps(vnf_pkg_id, internal=True) _, catalog = database.list_catalog(vnf_pkg_id) vnfd_name = catalog[0]['vnfd_name'] topology_template = vnffgd['vnffgd']['template']['vnffgd']['topology_template'] # verifying if this vnf package was already added to this VNFFG (no duplicate) if vnfd_name in topology_template['groups']['VNFFG1']['properties']['constituent_vnfs']: return jsonify({'status': ERROR, 'reason': 'The selected VNF Package was already added on this SFC!'}) cp_list = sorted(vnf_pkg_cps) # we are considering that the first CP is always reserved for the VNF management interface # Thus, it is not used for VNF chaining cp_list.pop(0) # gets all virtual links in VNFFGD vnffgd_vls = topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link'] # getting the previous network_name for correct VNF chaining previous_net_name = '' if vnffgd_vls: previous_vl = vnffgd_vls[-1] # gets the current last VL in VNFFG # gets the current last VNF Name in VNFFGD previous_vnfd_name = topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'][-1] _, previous_vnf_pkg_id = database.list_catalog(vnfd_name=previous_vnfd_name) previous_vnf_pkg_id = previous_vnf_pkg_id[0]['_id'] # gets all connection points data from previous VNFD previous_vnfd_cps = list_vnf_pkg_cps(previous_vnf_pkg_id, internal=True) for cp in previous_vnfd_cps: if previous_vnfd_cps[cp]['virtual_link'] == previous_vl: previous_net_name = previous_vnfd_cps[cp]['network_name'] break cp_in, cp_out = "", "" # including cp_input for cp in cp_list: if vnffgd_vls: # if there are previous Virtual Links included in VNFFGD # cp_in is valid just if it is connected to the same network_name from previous VNF output if vnf_pkg_cps[cp]['network_name'] == previous_net_name: cp_in = cp break else: # if this VNF is the first one being included in VNFFGD cp_in = cp break if not cp_in: return jsonify({'status': ERROR, 'reason': 'There is no suitable CP to chaining with previous VNF!'}) # including cp_output num_cps = len(cp_list) if num_cps == 1: cp_out = cp_in else: # num_cps surely will be > 1, because previous return # output CP requirements are dependent of NFVO capabilities, thus it was implemented in the related agent result, data = tacker_agent.select_and_validate_cp_out(request.json, vnf_pkg_cps, cp_in) if result != OK: return jsonify(data) cp_out = data if cp_in == cp_out: capability = [cp_in] else: capability = [cp_in, cp_out] for cp in capability: # including connection points topology_template['groups']['VNFFG1']['properties']['connection_point'].append(cp) # including dependent virtual links virtual_link = vnf_pkg_cps[cp]['virtual_link'] # if virtual_link not in topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link']: topology_template['groups']['VNFFG1']['properties']['dependent_virtual_link'].append(virtual_link) # including constituent VNFs topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'].append(vnfd_name) vnf_end_points = len(capability) if vnf_end_points == 1: capability = capability[0] else: capability = ','.join(capability) # including number of endpoints topology_template['groups']['VNFFG1']['properties']['number_of_endpoints'] += vnf_end_points path = {"forwarder": vnfd_name, "capability": capability} # including VNF forwarding path topology_template['node_templates']['Forwarding_path1']['properties']['path'].append(path) # debug logger.debug('VNFFGD Template UUID: %s\n%s', request.json['sfc_uuid'], json.dumps(vnffgd, indent=4, sort_keys=True)) cache.set(request.json['sfc_uuid'], vnffgd) return jsonify({'status': OK}) # TODO: consider to move part of this function to tacker_agent if other NFVOs don't need "network_src_port_id" criteria @app.route('/sfc/acl/origin', methods=['POST']) def include_sfc_traffic_origin(): """Includes ACL criteria according to INTERNAL or EXTERNAL traffic source INTERNAL traffic is sourced from VNFs managed by NFVO, while EXTERNAL traffic is sourced from everything out from NFVO networks. This function also includes specific requirements to select the source port for any NFVO. Currently, it just supports Tacker NFVO. Tacker has the requirement for 'network_source_port_id' in ACL criteria, which is included in VNFFGD by this function. One important rule is applied: 1. Tacker's network_name from the origin VNF CP must be the same as the input CP of the first VNF in the chain. If there are more CPs than 1, then a message with status OPTIONS and a cp_list is replied in order to the user inform a desirable connection point. JSON input arguments are: - sfc_uuid: the unique identifier to the SFC being composed - origin: if the SFC traffic source is INTERNAL or EXTERNAL - vnf_id: the VNF unique identifier from the NFVO - resource: optional when using INTERNAL origin. Identifies the user manually input of the cp_out :return: OK if success, or ERROR and its reason if not, or OPTIONS and a cp_list dict """ vnffgd = None cache = MemcachedCache() if 'sfc_uuid' in request.json: vnffgd = cache.get(request.json['sfc_uuid']) if not vnffgd: return jsonify({'status': ERROR, 'reason': 'SFC UUID not found!'}) database = DatabaseConnection() origin = request.json['origin'] topology_template = vnffgd['vnffgd']['template']['vnffgd']['topology_template'] # sfp = service function path sfp_cps = topology_template['groups']['VNFFG1']['properties']['connection_point'] sfp_vnfs = topology_template['groups']['VNFFG1']['properties']['constituent_vnfs'] # network_src_port_id is a requirement for Tacker NFVO criteria = topology_template['node_templates']['Forwarding_path1'] \ ['properties']['policy']['criteria'] _, catalog = database.list_catalog(vnfd_name=sfp_vnfs[0]) sfp_first_pkg_id = catalog[0]['_id'] sfp_first_vnf_cps = list_vnf_pkg_cps(sfp_first_pkg_id, internal=True) if origin == INTERNAL: vnf_id = request.json['vnf_id'] resp, data = database.list_vnf_instances(vnf_id=vnf_id) if resp != OK: return jsonify({'status': resp, 'reason': data}) # Only VNFs instantiated by this framework can be used as origin, # as we need get information of its CP on VNF Packages if not data: return jsonify({'status': ERROR, 'reason': 'The chosen VNF was not instantiated by Holistic-Composer!'}) vnf_pkg_id = data[0]['vnf_pkg_id'] vnf_pkg_cps = list_vnf_pkg_cps(vnf_pkg_id, internal=True) # Leave just the CPs that are in the same subnet of the first VNF CP_in of the SFC cps = vnf_pkg_cps.keys() cps = list(cps) for cp in cps: if vnf_pkg_cps[cp]['network_name'] != sfp_first_vnf_cps[sfp_cps[0]]['network_name']: vnf_pkg_cps.pop(cp) # resource means the CP_out if 'resource' not in request.json: # Selects the suitable CP_out automatically if not vnf_pkg_cps: return jsonify({'status': ERROR, 'reason': 'No suitable CP on this VNF!'}) if len(vnf_pkg_cps) == 1: cp_name = list(vnf_pkg_cps.keys())[0] else: return jsonify({ 'status': OPTIONS, 'cp_list': vnf_pkg_cps }) else: cp_name = request.json['resource'] if cp_name not in vnf_pkg_cps: return jsonify({'status': ERROR, 'reason': 'Invalid CP!'}) response = get_vnf_nfvo_resource_id(vnf_id, cp_name) if response['status'] != OK: return jsonify(response) criteria.append({'network_src_port_id': response['resource_id']}) elif origin == EXTERNAL: resp, data = tacker_agent.get_fip_router_interface_id( sfp_first_vnf_cps[sfp_cps[0]]['network_name']) if resp == OK: criteria.append({'network_src_port_id': data}) else: return jsonify({ 'status': resp, 'reason': data }) else: return jsonify({ 'status': ERROR, 'reason': 'Error 500: Network traffic source to SFP should be INTERNAL or EXTERNAL.' }) # debug logger.debug('VNFFGD Template UUID: %s\n%s', request.json['sfc_uuid'], json.dumps(vnffgd, indent=4, sort_keys=True)) cache.set(request.json['sfc_uuid'], vnffgd) return jsonify({'status': OK}) def acl_criteria_parser(acl): """Parses all ACL criteria according of each NFVO requirements. It parses from strings to ints all ACL criteria to match the NFVO requirements. Currently just Tacker requirements are implemented. :param acl: a dict with the acl criteria :return: OK and data if success, or ERROR and its reason if not """ # TODO: this function should be moved to the tacker_agent with open('tacker_nfv_defs.yaml', 'r') as defs_file: acl_defs = defs_file.read() acl_defs = yaml.load(acl_defs) acl_types = acl_defs['data_types']['tosca.nfv.datatypes.aclType']['properties'] # TODO: also consider implementing validation constraints: in_range [...] from tacker_nfv_defs.yaml for item in acl: k = list(item.keys()) k = k[0] if k not in acl_types: return ERROR, 'Invalid ACL criteria "%s"!' % k if acl_types[k]['type'] == 'integer': item[k] = int(item[k]) return OK, acl @app.route('/sfc/acl', methods=['POST']) def include_sfc_acl(): """Includes ACL criteria in VNFFGD JSON arguments are: - sfc_uuid: the unique identifier of the SFC being composed - acl: a dict containing the acl criteria to be added into the vnffgd template :return: OK if success, or ERROR and its reason if not """ vnffgd = None cache = MemcachedCache() if 'sfc_uuid' in request.json: vnffgd = cache.get(request.json['sfc_uuid']) if not vnffgd: return jsonify({'status': ERROR, 'reason': 'SFC UUID not found!'}) acl = request.json['acl'] topology_template = vnffgd['vnffgd']['template']['vnffgd']['topology_template'] criteria = topology_template['node_templates']['Forwarding_path1'] \ ['properties']['policy']['criteria'] res, acl = acl_criteria_parser(acl) if res != OK: return jsonify({'status': ERROR, 'reason': acl}) for rule in acl: criteria.append(rule) #debug logger.debug('VNFFGD Template UUID: %s\n%s', request.json['sfc_uuid'], json.dumps(vnffgd, indent=4, sort_keys=True)) cache.set(request.json['sfc_uuid'], vnffgd) return jsonify({'status': OK}) def set_next_vnffgd_path_id(vnffgd): """Set up the next VNFFGD SFP id in the SFC being composed Retrieves the largest number of the SFP ID in the vnffgd catalog from the NFVO and sets the next one in the currently vnffgd being composed. :return: OK and the last path id, or ERROR and its reason """ result, data = tacker_agent.vnffgd_list() if result != OK: return result, data last_path_id = 0 for item in data: path_id = item['template']['vnffgd']['topology_template'] \ ['node_templates']['Forwarding_path1']['properties']['id'] if path_id > last_path_id: last_path_id = path_id vnffgd['vnffgd']['template']['vnffgd']['topology_template'] \ ['node_templates']['Forwarding_path1']['properties']['id'] = last_path_id + 1 return OK, last_path_id def sfc_rollback_actions(vnf_instance_ids, vnffgd_id=None): """Executes the rollback actions whether an error occurs while instantiating a VNFFG""" logger.info("Executing rollback actions...") error_message = '' if vnffgd_id: logger.info("Destroying VNFFGD %s", vnffgd_id) resp, data = tacker_agent.vnffgd_delete(vnffgd_id) if resp != OK: return data for vnf_id in vnf_instance_ids: rollback_data = destroy_vnf(vnf_id) # returns a flask.Response object rollback_data = json.loads(rollback_data.response[0]) # parsing from string do json if rollback_data['status'] != OK: error_message = ' '.join([error_message, rollback_data['reason']]) logger.error(rollback_data['reason']) logger.info('Rollback done!') return error_message @app.route('/sfc/start', methods=['POST']) def create_vnffg_v2(): """Sends and instantiates all VNFDs and VNFFGDs to the NFVO communication agent If an error occurs it also calls rollback actions JSON input parameters are: - sfc_uuid: the unique identifier of the composed SFC to be started :return: OK if all succeed, or ERROR and its reason """ vnffgd = None cache = MemcachedCache() if 'sfc_uuid' in request.json: vnffgd = cache.get(request.json['sfc_uuid']) if not vnffgd: return jsonify({'status': ERROR, 'reason': 'SFC UUID not found!'}) database = DatabaseConnection() vnf_instance_list = [] vnf_mapping = {} constituent_vnfs = vnffgd['vnffgd']['template']['vnffgd']['topology_template'] \ ['groups']['VNFFG1']['properties']['constituent_vnfs'] # configuring VNFFGD unique name vnffgd['vnffgd']['name'] = unique_id() # instantiating all VNFDs in VNFFGD for vnfd_name in constituent_vnfs: result, data = database.list_catalog(vnfd_name=vnfd_name) if result != OK: return jsonify({'status': result, 'reason': data}) vnf_data = instantiate_vnf(data[0]['_id']) # returns a flask.Response object # vnf_data = json.loads(vnf_data.response[0]) # parsing from string do json vnf_data = vnf_data.json # Rollback action if a given VNF fails on instantiating if vnf_data['status'] != OK: message = sfc_rollback_actions(vnf_instance_list) vnf_data['reason'] = ' '.join([vnf_data['reason'], message]) return jsonify(vnf_data) vnf_instance_list.append(vnf_data['vnf_id']) vnf_mapping[vnfd_name] = vnf_data['vnf_id'] # incrementing SFP path_id number in VNFFGD # TODO: consider put the set_next_vnffgd_path_id() in a critical region to avoid condition racing result, data = set_next_vnffgd_path_id(vnffgd) if result != OK: message = sfc_rollback_actions(vnf_instance_list) data = ' '.join([data, message]) return jsonify({'status': result, 'reason': data}) # create VNFFGD in NFVO resp, data = tacker_agent.vnffgd_create(vnffgd) # TODO: critical region until here # show the ultimate created VNFFGD logger.info('VNFFGD Template UUID: %s\n%s', request.json['sfc_uuid'], json.dumps(vnffgd, indent=4, sort_keys=True)) if resp != OK: # Rollback actions message = sfc_rollback_actions(vnf_instance_list) data = ' '.join([data, message]) logger.error(data) return jsonify({'status': resp, 'reason': data}) vnffgd_id = data logger.info("VNFFG descriptor created with id %s", vnffgd_id) # instantiate VNFFG resp, data = tacker_agent.vnffg_create(vnffgd_id, vnf_mapping, unique_id()) if resp != OK: # Rollback actions message = sfc_rollback_actions(vnf_instance_list, vnffgd_id) message = ' '.join([data, message]) return jsonify({'status': resp, 'reason': message}) vnffg_id = data logger.info("VNFFG %s instantiated successfully!", vnffg_id) resp, data = database.insert_sfc_instance(vnf_instance_list, vnffgd_id, vnffg_id) if resp != OK: # Rollback actions vnffg_resp, vnffg_data = tacker_agent.vnffg_delete(vnffg_id) if vnffg_resp != OK: return jsonify({'status': vnffg_resp, 'reason': ': '.join(['VNFFG', vnffg_data])}) message = sfc_rollback_actions(vnf_instance_list, vnffgd_id) message = ' '.join([data, message]) return jsonify({'status': resp, 'reason': message}) cache.delete(request.json['sfc_uuid']) return jsonify({'status': OK}) @app.route('/sfc/<vnffg_id>', methods=['DELETE']) def destroy_vnffg(vnffg_id): """Destroy the VNFFG and its VNFs This function destroys the VNFFG and its VNFFGDs, and also all the VNFs and its VNFDs that are specified in the VNFFG :param vnffg_id: the NFVO unique identifier of the VNFFG :return: OK if succeed, or ERROR and its reason if not """ database = DatabaseConnection() state, data = tacker_agent.vnffg_show(vnffg_id) if state != OK: return jsonify({'status': state, 'reason': data}) vnffgd_id = data['vnffgd_id'] vnf_mapping = data['vnf_mapping'] vnffg_vnfs = [] for vnf_id in vnf_mapping.values(): vnffg_vnfs.append(vnf_id) # destroying VNFFG resp, data = tacker_agent.vnffg_delete(vnffg_id) if state != OK: return jsonify({'status': resp, 'reason': data}) # remove SFC_Instance from database resp, sfc_instance = database.list_sfc_instances(vnffg_id=vnffg_id) if resp == OK: if len(sfc_instance) > 0: _id = sfc_instance[0]['_id'] database.remove_sfc_instance(_id) # TODO: How many time we should wait before remove the VNFFGD? time.sleep(2) # destroying VNFFGD resp, data = tacker_agent.vnffgd_delete(vnffgd_id) if state != OK: return jsonify({'status': resp, 'reason': data}) # destroying VNFs message = '' for vnf_id in vnffg_vnfs: vnf_data = destroy_vnf(vnf_id) # returns a flask.Response object vnf_data = json.loads(vnf_data.response[0]) # parsing from string do json if vnf_data['status'] != OK: message = ' '.join([message, '\nVNF id %s: ' % vnf_id, vnf_data['reason']]) if message: return jsonify({'status': ERROR, 'reason': message}) return jsonify({'status': OK}) @app.route('/sfc', methods=['GET']) def list_vnffg(): resp, data = tacker_agent.vnffg_list() if resp != OK: return jsonify({'status': resp, 'reason': data}) return jsonify({'status': OK, 'vnffgs': data}) if __name__ == '__main__': # app.run(threaded=True) # app.run(threaded=False) app.run(processes=4, threaded=False)
#!/usr/bin/env python """Stats server implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals import collections import errno import json import logging import socket import threading from future.builtins import range from future.moves.urllib import parse as urlparse from future.utils import iteritems from http import server as http_server import prometheus_client from grr_response_core import config from grr_response_core.lib import registry from grr_response_core.lib import utils from grr_response_core.lib.rdfvalues import stats as rdf_stats from grr_response_core.stats import stats_collector_instance from grr_response_server import base_stats_server def _JSONMetricValue(metric_info, value): if metric_info.metric_type == rdf_stats.MetricMetadata.MetricType.EVENT: return dict( sum=value.sum, counter=value.count, bins_heights=collections.OrderedDict(value.bins_heights)) else: return value def BuildVarzJsonString(): """Builds Varz JSON string from all stats metrics.""" results = {} for name, metric_info in iteritems( stats_collector_instance.Get().GetAllMetricsMetadata()): info_dict = dict(metric_type=metric_info.metric_type.name) if metric_info.value_type: info_dict["value_type"] = metric_info.value_type.name if metric_info.docstring: info_dict["docstring"] = metric_info.docstring if metric_info.units: info_dict["units"] = metric_info.units.name if metric_info.fields_defs: info_dict["fields_defs"] = [] for field_def in metric_info.fields_defs: info_dict["fields_defs"].append((field_def.field_name, utils.SmartStr(field_def.field_type))) value = {} all_fields = stats_collector_instance.Get().GetMetricFields(name) for f in all_fields: joined_fields = ":".join(utils.SmartStr(fname) for fname in f) value[joined_fields] = _JSONMetricValue( metric_info, stats_collector_instance.Get().GetMetricValue(name, fields=f)) else: value = _JSONMetricValue( metric_info, stats_collector_instance.Get().GetMetricValue(name)) results[name] = dict(info=info_dict, value=value) encoder = json.JSONEncoder() return encoder.encode(results) class StatsServerHandler(http_server.BaseHTTPRequestHandler): """Default stats server implementation.""" def do_GET(self): # pylint: disable=g-bad-name if self.path == "/prometheus_metrics": # TODO: This code is copied from # prometheus_client.MetricsHandler. Because MetricsHandler is an old-style # class and dispatching to different BaseHTTPRequestHandlers is # surprisingly hard, we copied the code instead of calling it. After a # deprecation period, the /varz route will be removed and # StatsServerHandler can be replaced by prometheus_client.MetricsHandler. pc_registry = prometheus_client.REGISTRY params = urlparse.parse_qs(urlparse.urlparse(self.path).query) encoder, content_type = prometheus_client.exposition.choose_encoder( self.headers.get("Accept")) if "name[]" in params: pc_registry = pc_registry.restricted_registry(params["name[]"]) try: output = encoder(pc_registry) except: self.send_error(500, "error generating metric output") raise self.send_response(200) self.send_header("Content-Type", content_type) self.end_headers() self.wfile.write(output) elif self.path == "/varz": self.send_response(200) self.send_header("Content-type", "application/json") self.end_headers() self.wfile.write(BuildVarzJsonString()) elif self.path == "/healthz": self.send_response(200) else: self.send_error(404, "Not found") class StatsServer(base_stats_server.BaseStatsServer): """A statistics server that exposes a minimal, custom /varz route.""" def __init__(self, port): """Instantiates a new StatsServer. Args: port: The TCP port that the server should listen to. """ super(StatsServer, self).__init__(port) self._http_server = None self._server_thread = None def Start(self): """Start HTTPServer.""" try: self._http_server = http_server.HTTPServer(("", self.port), StatsServerHandler) except socket.error as e: if e.errno == errno.EADDRINUSE: raise base_stats_server.PortInUseError(self.port) else: raise self._server_thread = threading.Thread( target=self._http_server.serve_forever) self._server_thread.daemon = True self._server_thread.start() def Stop(self): """Stops serving statistics.""" self._http_server.shutdown() self._server_thread.join() class StatsServerInit(registry.InitHook): """Starts up a varz server after everything is registered.""" def RunOnce(self): """Main method of this registry hook. StatsServer implementation may be overriden. If there's a "stats_server" module present in grr/local directory then grr.local.stats_server.StatsServer implementation will be used instead of a default one. """ # Figure out which port to use. port = config.CONFIG["Monitoring.http_port"] if not port: logging.info("Monitoring server disabled.") return # TODO(user): Implement __contains__ for GrrConfigManager. max_port = config.CONFIG.Get("Monitoring.http_port_max", None) if max_port is None: # Use the same number of available ports as the adminui is using. If we # have 10 available for adminui we will need 10 for the stats server. adminui_max_port = config.CONFIG.Get("AdminUI.port_max", config.CONFIG["AdminUI.port"]) max_port = port + adminui_max_port - config.CONFIG["AdminUI.port"] try: # pylint: disable=g-import-not-at-top from grr_response_server.local import stats_server # pylint: enable=g-import-not-at-top server_cls = stats_server.StatsServer logging.debug("Using local StatsServer") except ImportError: logging.debug("Using default StatsServer") server_cls = StatsServer for port in range(port, max_port + 1): try: logging.info("Starting monitoring server on port %d.", port) server_obj = server_cls(port) server_obj.Start() return except base_stats_server.PortInUseError as e: if e.port < max_port: logging.info(e.message) continue raise
import unittest from unittest import mock import quibble.mediawiki.maintenance class TestMediawikiMaintenance(unittest.TestCase): @mock.patch.dict('os.environ', {'BAR': 'foo'}, clear=True) @mock.patch('subprocess.Popen') def test_install_php_uses_os_environment(self, mock_popen): mock_popen.return_value.returncode = 0 quibble.mediawiki.maintenance.install([]) (args, kwargs) = mock_popen.call_args env = kwargs.get('env', {}) self.assertIn('BAR', env) self.assertEqual('foo', env['BAR']) @mock.patch.dict('os.environ', {'LANG': 'C'}, clear=True) @mock.patch('subprocess.Popen') def test_install_php_enforces_LANG(self, mock_popen): mock_popen.return_value.returncode = 0 quibble.mediawiki.maintenance.install([]) (args, kwargs) = mock_popen.call_args env = kwargs.get('env', {}) self.assertEqual({'LANG': 'C.UTF-8'}, env) @mock.patch.dict('os.environ', {'BAR': 'foo'}, clear=True) @mock.patch('subprocess.Popen') def test_update_php_uses_os_environment(self, mock_popen): mock_popen.return_value.returncode = 0 quibble.mediawiki.maintenance.update([]) (args, kwargs) = mock_popen.call_args env = kwargs.get('env', {}) self.assertEqual({'BAR': 'foo'}, env) @mock.patch.dict('os.environ', clear=True) @mock.patch('subprocess.Popen') def test_update_php_default_to_no_mw_install_path(self, mock_popen): mock_popen.return_value.returncode = 0 quibble.mediawiki.maintenance.update([]) (args, kwargs) = mock_popen.call_args env = kwargs.get('env', {}) self.assertNotIn('MW_INSTALL_PATH', env) @mock.patch.dict('os.environ', clear=True) @mock.patch('subprocess.Popen') def test_update_php_sets_mw_install_path(self, mock_popen): mock_popen.return_value.returncode = 0 quibble.mediawiki.maintenance.update([], mwdir='test/sources') (args, kwargs) = mock_popen.call_args env = kwargs.get('env', {}) self.assertIn('MW_INSTALL_PATH', env) self.assertEqual(env['MW_INSTALL_PATH'], 'test/sources') @mock.patch('subprocess.Popen') def test_update_php_raises_exception_on_bad_exit_code(self, mock_popen): mock_popen.return_value.returncode = 42 with self.assertRaisesRegex( Exception, 'Update failed with exit code: 42' ): quibble.mediawiki.maintenance.update([], mwdir='test/sources') @mock.patch('subprocess.Popen') def test_rebuildlocalisationcache_default_lang_parameter(self, mock_popen): mock_popen.return_value.returncode = 0 quibble.mediawiki.maintenance.rebuildLocalisationCache() (args, kwargs) = mock_popen.call_args params = args[0][2:] self.assertEqual(['--lang', 'en'], params) @mock.patch('subprocess.Popen') def test_rebuildlocalisationcache_lang_parameter(self, mock_popen): mock_popen.return_value.returncode = 0 quibble.mediawiki.maintenance.rebuildLocalisationCache( lang=['fr', 'zh'] ) (args, kwargs) = mock_popen.call_args params = args[0][2:] self.assertEqual(['--lang', 'fr,zh'], params) @mock.patch('subprocess.Popen') def test_rebuildlocalisationcache_raises_exception_on_bad_exit_code( self, mock_popen ): mock_popen.return_value.returncode = 43 with self.assertRaisesRegex( Exception, 'rebuildLocalisationCache failed with exit code: 43' ): quibble.mediawiki.maintenance.rebuildLocalisationCache()
from abc import ABC, abstractmethod import treeclasses import random import copy from sklearn.externals import joblib from sklearn.neural_network import MLPRegressor import pandas import mtrain class Player(ABC): def __init__(self, player_num): self.player_num = player_num super().__init__() @abstractmethod def play_train(self, dominos, start_value): pass @abstractmethod def play_forced_double(self, dominos, start_value): pass @abstractmethod def play_normally(self, dominos, trains, round_number, turn_number): pass def can_play_on_single(self, dominos, targets): """ Checks to find all possible single domino plays that can play on the available targets """ potential_plays = [] for target in targets: for domino in dominos: if domino[0] == domino[1]: continue else: if domino[0] == target[1]: potential_plays.append((target, (domino[0], domino[1]))) if domino[1] == target[1]: potential_plays.append((target, (domino[1], domino[0]))) return potential_plays def can_play_on_double_good(self, dominos, targets): """ Checks to find all possible good double pair dominos that can play on the available targets """ potential_plays = [] for target in targets: for domino1 in dominos: if domino1[0] == domino1[1] and domino1[0] == target[1]: for domino2 in dominos: if domino1[0] == domino2[0] and domino1[1] == domino2[1]: continue elif domino1[0] == domino2[0]: potential_plays.append((target, domino1, (domino2[0], domino2[1]))) elif domino1[0] == domino2[1]: potential_plays.append((target, domino1, (domino2[1], domino2[0]))) else: continue else: continue return potential_plays def can_play_on_double_bad(self, dominos, targets): """ Checks to find all possible bad double pair dominos that can play on the available targets """ potential_plays = [] for target in targets: for domino in dominos: if domino[0] == domino[1] and domino[0] == target[1]: potential_plays.append((target, domino)) else: continue return potential_plays def find_longest_path(self, node, touched): children = [] for con in node.connections: if not con.weight in touched: children.append(con) touched.append(node.weight) if len(children) == 0: return [node.weight] weight = 0 path = [] for child in children: pt = self.find_longest_path(child, touched) if sum(pt) > weight: weight = sum(pt) path = pt return [node.weight] + path def build_network(self, dominos, value, touched): next_tiles = [] for dom in dominos: if dom[0] == value: next_tiles.append((dom[0], dom[1])) elif dom[1] == value: next_tiles.append((dom[1], dom[0])) touched.append(value) nodes = [] for tile in next_tiles: if not tile[1] in touched: nodes.append(self.build_network(dominos, tile[1], touched)) elif tile[1] == value: count = 0 for t in touched: if t == tile[1]: count += 1 if count == 2: continue else: nodes.append(self.build_network(dominos, tile[1], touched)) nd = treeclasses.WeightedNode(str(value), value) for node in nodes: nd.add_connection(node=node) return nd class GreedyPlayer(Player): def play_train(self, dominos, start_value): """ Returns a list of the longest possible series of dominos to play """ nd = self.build_network(dominos, start_value, []) path = self.find_longest_path(nd, []) train = [] for x in range(0, len(path) - 1): train.append((path[x], path[x+1])) return train def play_forced_double(self, dominos, train): """ Returns a list of the longest possible series of dominos to play """ needed = train.get_last() plays = self.can_play_on_single(dominos, [needed]) if len(plays) == 0: return [], [] else: scores = [] play_data = [] for play in plays: scores.append(play[1][0] + play[1][1]) play_data.append(play[1]) max_score = -1 index = -1 for play_num in range(0, len(plays)): if scores[play_num] > max_score: max_score = scores[play_num] index = play_num return [plays[index][1]], play_data def play_normally(self, dominos, trains, round_number, turn_number): """ Returns a list of the longest possible series of dominos to play """ targets = [] for x in range(0, len(trains)): train = trains[x] if train.marker_up: if train.get_last() == (-1, -1): targets.append((round_number, round_number)) else: targets.append(train.get_last()) targets.append(trains[self.player_num].get_last()) scores = [[],[],[]] potential_plays = [[],[],[]] potential_plays[0] = self.can_play_on_single(dominos, targets) potential_plays[1] = self.can_play_on_double_good(dominos, targets) potential_plays[2] = self.can_play_on_double_bad(dominos, targets) for play in potential_plays[0]: scores[0].append(play[1][0] + play[1][1]) for play in potential_plays[1]: scores[1].append(play[1][0] + play[1][1] + play[2][0] + play[2][1]) for play in potential_plays[2]: scores[2].append(play[1][0] + play[1][1]) max_score = -1 x_index = -1 y_index = -1 for x in range(0, 3): for y in range(0, len(scores[x])): if scores[x][y] > max_score: max_score = scores[x][y] x_index = x y_index = y if x_index == -1 or y_index == -1: return -1, [], [] play = potential_plays[x_index][y_index] target = play[0] t_num = -1 plays = [] for x in range(1, len(play)): plays.append(play[x]) for x in range(0, len(trains)): train = trains[x] if train.marker_up or x == self.player_num: if train.get_last() == (-1, -1) and target == (round_number, round_number): t_num = x elif target == train.get_last(): t_num = x return t_num, plays, potential_plays class RandomPlayer(Player): def play_train(self, dominos, start_value): """ Returns a list of the longest possible series of dominos to play """ nd = self.build_network(dominos, start_value, []) path = self.find_longest_path(nd, []) train = [] for x in range(0, len(path) - 1): train.append((path[x], path[x+1])) return train def play_forced_double(self, dominos, train): """ Returns a list of the longest possible series of dominos to play """ needed = train.get_last() plays = self.can_play_on_single(dominos, [needed]) if len(plays) == 0: return [], [] else: play_data = [] for play in plays: play_data.append(play[1]) index = random.randrange(0, len(plays)) return [plays[index][1]], play_data def play_normally(self, dominos, trains, round_number, turn_number): """ Returns a list of the longest possible series of dominos to play """ targets = [] for x in range(0, len(trains)): train = trains[x] if train.marker_up: if train.get_last() == (-1, -1): targets.append((round_number, round_number)) else: targets.append(train.get_last()) targets.append(trains[self.player_num].get_last()) potential_plays = [[],[],[]] potential_plays[0] = self.can_play_on_single(dominos, targets) potential_plays[1] = self.can_play_on_double_good(dominos, targets) potential_plays[2] = self.can_play_on_double_bad(dominos, targets) x_index = -1 y_index = -1 for x in [1, 0, 2]: if len(potential_plays[x]) == 0: continue else: x_index = x y_index = random.randrange(0, len(potential_plays[x])) if x_index == -1 or y_index == -1: return -1, [], [] play = potential_plays[x_index][y_index] target = play[0] t_num = -1 plays = [] for x in range(1, len(play)): plays.append(play[x]) for x in range(0, len(trains)): train = trains[x] if train.marker_up or x == self.player_num: if train.get_last() == (-1, -1) and target == (round_number, round_number): t_num = x elif target == train.get_last(): t_num = x return t_num, plays, potential_plays class ProbabilityPlayer(Player): def __init__(self, player_num, domino_size): super().__init__(player_num) self.domino_size = domino_size def get_unknown_dominos(self, trains, dominos, round_number): known_dominos = copy.deepcopy(dominos) known_dominos += [(round_number, round_number)] for train in trains: known_dominos += train.train_list unknown_dominos = [] for x in range(0, self.domino_size + 1): for y in range(x, self.domino_size + 1): if not (x,y) in known_dominos and not (y,x) in known_dominos: unknown_dominos.append((x,y)) return unknown_dominos def playability_probabilities(self, unknown_dominos): play_probs = [] for value in range(0, self.domino_size+1): left_over_doms = [] for domino in unknown_dominos: if domino[0] == value or domino[1] == value: left_over_doms.append(domino) prob = float(len(left_over_doms)) / float(len(unknown_dominos)) play_probs.append(prob) return play_probs def play_train(self, dominos, start_value): """ Returns a list of the longest possible series of dominos to play """ nd = self.build_network(dominos, start_value, []) path = self.find_longest_path(nd, []) train = [] for x in range(0, len(path) - 1): train.append((path[x], path[x+1])) return train def play_forced_double(self, dominos, train): """ Returns a list of the longest possible series of dominos to play """ needed = train.get_last() plays = self.can_play_on_single(dominos, [needed]) if len(plays) == 0: return [], [] else: scores = [] play_data = [] for play in plays: scores.append(play[1][0] + play[1][1]) play_data.append(play[1]) max_score = -1 index = -1 for play_num in range(0, len(plays)): if scores[play_num] > max_score: max_score = scores[play_num] index = play_num return [plays[index][1]], play_data def play_normally(self, dominos, trains, round_number, turn_number): """ Returns a list of the longest possible series of dominos to play """ targets = [] for x in range(0, len(trains)): train = trains[x] if train.marker_up: if train.get_last() == (-1, -1): targets.append((round_number, round_number)) else: targets.append(train.get_last()) targets.append(trains[self.player_num].get_last()) scores = [[],[],[]] probs = [[],[],[]] play_off_score = [[],[],[]] potential_plays = [[],[],[]] potential_plays[0] = self.can_play_on_single(dominos, targets) potential_plays[1] = self.can_play_on_double_good(dominos, targets) potential_plays[2] = self.can_play_on_double_bad(dominos, targets) unknown_dominos = self.get_unknown_dominos(trains, dominos, round_number) playability_probabilities = self.playability_probabilities(unknown_dominos) for play in potential_plays[0]: scores[0].append(play[1][0] + play[1][1]) try: probs[0].append(playability_probabilities[play[1][1]]) except TypeError: print("Play: " + str(play)) print("Targets: " + str(targets)) print("Dominos: " + str(dominos)) print("Dominos size: " + str(len(dominos))) print("Playernum: " + str(self.player_num)) raise TypeError("Issue") doms = [dom for dom in dominos if not dom == play[1]] targs = [tar for tar in targets if not tar == play[0]] targs += [play[1]] if len(self.can_play_on_single(doms, targs)) > 0 \ or len(self.can_play_on_double_good(doms, targs)) > 0: play_off_score[0].append(1) else: play_off_score[0].append(0) for play in potential_plays[1]: scores[1].append(play[1][0] + play[1][1] + play[2][0] + play[2][1]) probs[1].append(playability_probabilities[play[2][1]]) doms = [dom for dom in dominos if not dom == play[1] and not dom == play[2]] targs = [tar for tar in targets if not tar == play[0]] targs += [play[2]] if len(self.can_play_on_single(doms, targs)) > 0 \ or len(self.can_play_on_double_good(doms, targs)) > 0: play_off_score[1].append(1) else: play_off_score[1].append(0) for play in potential_plays[2]: scores[2].append(play[1][0] + play[1][1]) probs[2].append(playability_probabilities[play[1][1]]) doms = [dom for dom in dominos if not dom == play[1]] targs = [tar for tar in targets if not tar == play[0]] targs += [play[1]] if len(self.can_play_on_single(doms, targs)) > 0 \ or len(self.can_play_on_double_good(doms, targs)) > 0: play_off_score[2].append(1) else: play_off_score[2].append(0) combined_score = [[0 for x in scores[0]],[0 for x in scores[1]],[0 for x in scores[2]]] for x in range(0, 3): for y in range(0, len(scores[x])): combined_score[x][y] = .5 * scores[x][y] / (self.domino_size * 2) combined_score[x][y] += .5 * (1 - probs[x][y]) * play_off_score[x][y] max_net_score = -1 x_index = -1 y_index = -1 for x in range(0, 3): for y in range(0, len(combined_score[x])): if combined_score[x][y] > max_net_score: max_net_score = combined_score[x][y] x_index = x y_index = y if x_index == -1 or y_index == -1: return -1, [], [] play = potential_plays[x_index][y_index] target = play[0] t_num = -1 plays = [] for x in range(1, len(play)): plays.append(play[x]) for x in range(0, len(trains)): train = trains[x] if train.marker_up or x == self.player_num: if train.get_last() == (-1, -1) and target == (round_number, round_number): t_num = x elif target == train.get_last(): t_num = x return t_num, plays, potential_plays class NeuralPlayer(Player): def __init__(self, player_num, domino_size, filename, num_players): super().__init__(player_num) self.num_players = num_players self.domino_size = domino_size self.network = joblib.load(filename + ".pkl") self.features = ["round_number", "turn_number", "t_num"] self.features += ["play", "hand", "unknown", "potential_plays"] for num in range(0, num_players + 1): self.features.append("train_" + str(num)) self.features.append("marker_" + str(num)) def predict_scores_of_plays(self, play_data): if play_data.shape[0] < 1: return [] scores = self.network.predict(play_data) return scores def get_unknown_dominos(self, trains, dominos, round_number): known_dominos = copy.deepcopy(dominos) known_dominos += [(round_number, round_number)] for train in trains: known_dominos += train.train_list unknown_dominos = [] for x in range(0, self.domino_size + 1): for y in range(x, self.domino_size + 1): if not (x,y) in known_dominos and not (y,x) in known_dominos: unknown_dominos.append((x,y)) return unknown_dominos def build_dataframe(self, round_number, turn_number, trains, dominos, potential_plays): first_frame = pandas.DataFrame(columns=self.features) for x in range(0, len(potential_plays)): first_frame.loc[x, "round_number"] = round_number first_frame.loc[x, "turn_number"] = turn_number plays = [potential_plays[y][1] for y in range(0, len(potential_plays))] first_frame.loc[x, "potential_plays"] = mtrain.create_one_hot(self.domino_size, plays) unknown = self.get_unknown_dominos(trains, dominos, round_number) first_frame.loc[x, "unknown"] = mtrain.create_one_hot(self.domino_size, unknown) first_frame.loc[x, "hand"] = mtrain.create_one_hot(self.domino_size, dominos) for train_num in range(0, len(trains)): first_frame.loc[x, "train_" + str(train_num)] = mtrain.create_one_hot(self.domino_size, trains[train_num].train_list) if trains[train_num].marker_up: first_frame.loc[x, "marker_" + str(train_num)] = 1 else: first_frame.loc[x, "marker_" + str(train_num)] = 0 if trains[train_num].marker_up or train_num == self.player_num: target = trains[train_num].get_last() if target == (-1, -1) and potential_plays[x][0] == (round_number, round_number): first_frame.loc[x, "t_num"] = train_num elif target == potential_plays[x][0]: first_frame.loc[x, "t_num"] = train_num first_frame.loc[x, "play"] = mtrain.create_one_hot(self.domino_size,[potential_plays[x][1]]) true_data = pandas.DataFrame() true_data["round_number"] = first_frame["round_number"] true_data["turn_number"] = first_frame["turn_number"] true_data["t_num"] = first_frame["t_num"] domino_count = int(self.domino_size + 1 + ((self.domino_size + 1) * self.domino_size) / 2.0) for current in range(0, domino_count): true_data["play_" + str(current)] = [x[current] for x in first_frame["play"]] true_data["hand_" + str(current)] = [x[current] for x in first_frame["hand"]] true_data["unknown_" + str(current)] = [x[current] for x in first_frame["unknown"]] true_data["potential_plays_" + str(current)] = [x[current] for x in first_frame["potential_plays"]] for num in range(0, self.num_players + 1): true_data["marker_" + str(num)] = first_frame["marker_" + str(num)] for current in range(0, domino_count): true_data["train_" + str(num) + "_" + str(current)] = [x[current] for x in first_frame["train_" + str(num)]] return true_data def play_train(self, dominos, start_value): """ Returns a list of the longest possible series of dominos to play """ nd = self.build_network(dominos, start_value, []) path = self.find_longest_path(nd, []) train = [] for x in range(0, len(path) - 1): train.append((path[x], path[x+1])) return train def play_forced_double(self, dominos, train): """ Returns a list of the longest possible series of dominos to play """ needed = train.get_last() plays = self.can_play_on_single(dominos, [needed]) if len(plays) == 0: return [], [] else: scores = [] play_data = [] for play in plays: scores.append(play[1][0] + play[1][1]) play_data.append(play[1]) max_score = -1 index = -1 for play_num in range(0, len(plays)): if scores[play_num] > max_score: max_score = scores[play_num] index = play_num return [plays[index][1]], play_data def play_normally(self, dominos, trains, round_number, turn_number): """ Returns a list of the longest possible series of dominos to play """ targets = [] for x in range(0, len(trains)): train = trains[x] if train.marker_up or x == self.player_num: if train.get_last() == (-1, -1): targets.append((round_number, round_number)) else: targets.append(train.get_last()) potential_plays = [[],[],[]] potential_plays[0] = self.can_play_on_single(dominos, targets) potential_plays[1] = self.can_play_on_double_good(dominos, targets) potential_plays[2] = self.can_play_on_double_bad(dominos, targets) all_plays = copy.deepcopy(potential_plays[0]) all_plays += copy.deepcopy(potential_plays[1]) all_plays += copy.deepcopy(potential_plays[2]) formatted_plays = [] for play in all_plays: formatted_plays.append((play[0], play[-1])) try: data = self.build_dataframe(round_number, turn_number, trains, dominos, formatted_plays) scores = self.predict_scores_of_plays(data) except ValueError: print("Round: " + str(round_number)) print("Turn: " + str(round_number)) for tag in targets: print("Targets: " + str(tag)) for dom in dominos: print("Dominos: " + str(dom)) for pl in formatted_plays: print("Plays: " + str(pl)) print(data.head()) raise ValueError min_score = 1000 index = -1 for score_num in range(0, len(scores)): if scores[score_num] < min_score: index = score_num min_score = scores[score_num] if index == -1: return -1, [], [] play = all_plays[index] target = play[0] t_num = -1 plays = [] for x in range(1, len(play)): plays.append(play[x]) for x in range(0, len(trains)): train = trains[x] if train.marker_up or x == self.player_num: if train.get_last() == (-1, -1) and target == (round_number, round_number): t_num = x elif target == train.get_last(): t_num = x return t_num, plays, potential_plays
# -*- coding: UTF-8 -*- from tkinter import * class MainWindow: def __init__(self): self.frame = Tk() self.label_search = Label(self.frame, text = "Input a word:") self.text_search = Text(self.frame, height="1",width=30) self.label_Result = Label(self.frame, text = "Result from iciba:") self.text_Result = Text(self.frame, height="1",width=30) self.label_search.grid(row=0,column=0) self.text_search.grid(row=1,column=0) self.label_Result.grid(row=2,column=0) self.text_Result.grid(row=3,column=0) self.frame.mainloop() frame = MainWindow()
import sys import argparse import web3 import autobahn from autobahn import xbr def main (accounts): print('\nTest accounts - ETH/XBR balances and XBR data markets:\n') for acct in accounts: balance_eth = w3.eth.getBalance(acct) balance_xbr = xbr.xbrtoken.functions.balanceOf(acct).call() count_markets = xbr.xbrnetwork.functions.countMarketsByOwner(acct).call() print('acct {}: {:>28} ETH, {:>28} XBR, {:>4} markets'.format(acct, balance_eth, balance_xbr, count_markets)) print() if __name__ == '__main__': if not xbr.HAS_XBR: raise RuntimeError('fatal: missing xbr support in autobahn (install using "pip install autobahn [xbr]")') else: print('using autobahn v{}, web3.py v{}'.format(autobahn.__version__, web3.__version__)) parser = argparse.ArgumentParser() parser.add_argument('--gateway', dest='gateway', type=str, default=None, help='Ethereum HTTP gateway URL or None for auto-select (default: -, means let web3 auto-select).') args = parser.parse_args() if args.gateway: w3 = web3.Web3(web3.Web3.HTTPProvider(args.gateway)) else: # using automatic provider detection: from web3.auto import w3 # check we are connected, and check network ID if not w3.isConnected(): print('could not connect to Web3/Ethereum at: {}'.format(args.gateway or 'auto')) sys.exit(1) else: print('connected via provider "{}"'.format(args.gateway or 'auto')) # set new provider on XBR library xbr.setProvider(w3) # now enter main .. main(w3.eth.accounts)
from aiosnow import select from aiosnow.models.table.declared import IncidentModel as Incident async def main(client): async with Incident(client, table_name="incident") as inc: query = select().order_asc(inc.number) async for _, record in inc.stream(query, limit=500, page_size=50): print("{number} ({sys_id}): {short_description}".format(**record))
# Copyright (c) 2016-2018 Koninklijke Philips N.V. All rights reserved. A # copyright license for redistribution and use in source and binary forms, # with or without modification, is hereby granted for non-commercial, # experimental and research purposes, provided that the following conditions # are met: # - Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimers. # - Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimers in the # documentation and/or other materials provided with the distribution. If # you wish to use this software commercially, kindly contact # info.licensing@philips.com to obtain a commercial license. # # This license extends only to copyright and does not include or grant any # patent license or other license whatsoever. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. import sys from pysnark.lib.ggh_plain import ggh_hash def readhexbits(fl): while True: ch = fl.read(1) if ch=='': return if ch==' ' or ch=='\n' or ch=='\r': continue val = (ord(ch)-ord('A')+10 if ord(ch)>=ord('A') else ord(ch)-ord('0')) yield 1 if val&8!=0 else 0 yield 1 if val&4!=0 else 0 yield 1 if val&2!=0 else 0 yield 1 if val&1!=0 else 0 def bitstohex(str): try: while True: bit1 = str.next() bit2 = str.next() bit3 = str.next() bit4 = str.next() val = bit1*8 + bit2*4 + bit3*2 + bit4 yield chr(val-10+ord('A') if val >= 10 else val+ord('0')) except StopIteration: pass bits = readhexbits(sys.stdin) hexin = [] try: while True: hexin = [] for _ in xrange(7296): hexin.append(bits.next()) sys.stdout.write("".join(list(bitstohex(iter(ggh_hash(hexin)))))) except StopIteration: if len(hexin)>0: sys.stdout.write("".join(list(bitstohex(iter(ggh_hash(hexin)))))) print
#!/usr/bin/env python # -*- coding:utf-8 -*- # Author: Donny You (youansheng@gmail.com) # Repackage some file operations. from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from utils.tools.logger import Logger as Log class FileHelper(object): @staticmethod def make_dirs(dir_path, is_file=False): dir_name = FileHelper.dir_name(dir_path) if is_file else dir_path if not os.path.exists(dir_name): os.makedirs(dir_name) @staticmethod def dir_name(file_path): return os.path.dirname(file_path) @staticmethod def abs_path(file_path): return os.path.abspath(file_path) @staticmethod def shotname(file_name): shotname, extension = os.path.splitext(file_name) return shotname @staticmethod def list_dir(dir_name, prefix=''): filename_list = list() items = os.listdir(os.path.join(dir_name, prefix)) for item in items: fi_d = os.path.join(dir_name, prefix, item) if os.path.isdir(fi_d): prefix_temp = '{}/{}'.format(prefix, item).lstrip('/') filename_list += FileHelper.list_dir(dir_name, prefix_temp) else: filename_list.append('{}/{}'.format(prefix, item).lstrip('/')) return filename_list if __name__ == "__main__": print (FileHelper.list_dir('/home/donny/Projects'))
# -*- coding: utf-8 -*- # Copyright (c) 2016, thumbor-community # Use of this source code is governed by the MIT license that can be # found in the LICENSE file. from pyvows import Vows, expect from tc_shortener.generators import BaseGenerator from tc_core.context import Context from thumbor.config import Config from thumbor.importer import Importer @Vows.batch class Sha1GeneratorVows(Vows.Context): class ASha1Generator(Vows.Context): def topic(self): config = Config() importer = Importer(config) context = Context(None, config, importer) return BaseGenerator(context) class WhenNameIsPreserved(Vows.Context): def topic(self, generator): return generator.get('/unsafe/200x300/ignored_path/image.jpg') def should_ignore_path_prefix(self, topic): expect(topic).to_match('None/image.jpg') class WhenNameIsNotPreserved(Vows.Context): def topic(self, generator): return generator.get('/unsafe/200x300/ignored_path/image.jpg') def should_ignore_path_prefix(self, topic): expect(topic).to_match('None')
"""photrix.planning.py""" __author__ = "Eric Dose :: New Mexico Mira Project, Albuquerque" # Python system imports: import os import os.path from collections import OrderedDict from datetime import datetime, timezone, timedelta from math import floor, sqrt, ceil, cos, sin, pi from random import seed, uniform, shuffle # External library imports: import ephem import numpy as np import pandas as pd from astropy.coordinates import SkyCoord, EarthLocation, AltAz, Angle from astropy.stats import circmean from astropy import units as u from astropy.time import Time # Internal (photrix) imports: from .fov import make_fov_dict, FovError, Fov from .user import Astronight, Instrument, MOON_PHASE_NO_FACTOR from .util import RaDec, datetime_utc_from_jd, hhmm_from_datetime_utc, \ ra_as_hours, dec_as_hex, az_alt_at_datetime_utc, \ degrees_as_hex, jd_from_datetime_utc, Timespan, event_utcs_in_timespan from .web import get_aavso_webobs_raw_table DEGREES_PER_RADIAN = 180.0 / pi # USAGE: ******************************************************************* # pl.make_an_roster('20170525', 'c:/Astro/ACP/AN20170525', user_update_tolerance_days=0.1, # exp_time_factor=0.75) # pl.make_an_plan('c:/Astro/ACP/AN20170525/planning.xlsx', exp_time_factor=0.75) # ROSTER Target Statement types: # AZ Her ; standard FOV target # STARE 6 ST Tri ; standard stare FOV target (6 reps) # BURN AA Aur 11:00:00 +34:00:00 ; Burn target (240 sec in V and I) # IMAGE target_name V=12 B=12.5(2) 12:00:00 +23:34:45 ; arbitrary image, exp time from magnitude # IMAGE target_name Clear=240sec(5) 12:00:00 +23:34:45 ; arbitrary image, exp time requested directly FOV_DIRECTORY = "C:/Dev/Photometry/FOV/" STARE_EVENT_TYPES = {"eclipser": "minima", "exoplanet": "minima", "delta scuti": "maxima", 'rr lyrae': 'maxima'} MIN_AVAILABLE_SECONDS_DEFAULT = 900 MIN_AVAILABLE_SECONDS_STARE = 5400 MIN_MOON_DEGREES_DEFAULT = 45 MIN_MOON_DEGREES_STARE = 60 STARE_AN_PRIORITY_DIVIDER = 7.5 # >= this goes into the normal Roster list; < goes to low-pri list. FITS_DIRECTORY = "C:/Astro/Images" # DEFAULT_PLAN_DIRECTORY = 'C:/Astro/Plans' DT_FMT = '%Y-%m-%d %H:%M:%S.%f%z' # kludge around py inconsistency in python's datetime formats PHOTRIX_ROOT_DIRECTORY = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) LOCAL_OBS_CACHE_FULLPATH = os.path.join(PHOTRIX_ROOT_DIRECTORY, "local_obs_cache.csv") EARLIEST_AN_DATE = '20170101' LATEST_AN_DATE = '20241231' # Update this later, I suppose. AN_START_REL_UTC_0000 = 19 # timedelta(UTC of actual AN start - nominal AN @ 0000 hours UTC) # (19 is good for North America) NO_COMMENT_STRING = '[no plan comment given]' # ********** Roster & cache parameters: AAVSO_WEBOBS_ROWS_TO_GET = 100 MIN_ROWS_ONE_STARE = 10 MAX_DAYS_ONE_STARE = 0.5 DEFAULT_UPDATE_TOLERANCE_DAYS = 0.0416667 # 1 hour FORCE_AUTOGUIDE_TOKEN = 'AG+' # ********** ACP Timing: CHILL_DURATION = 240 # seconds. PLAN_START_DURATION = 30 # seconds AUTOFOCUS_DURATION = 260 # seconds, includes slew & filter wheel changes CHAIN_DURATION = 3 # seconds; a guess QUITAT_DURATION = 3 # seconds SHUTDOWN_DURATION = 480 # seconds; a guess # ********** Mount (L-500) Timing: NEW_TARGET_DURATION = 34.3 # seconds; slew + settle + ACP processing (no guider start etc) # ********** Dome (generic azimuth-tracking) Timing: DOME_IS_ACTIVE = True DOME_AZ_SLEW_SPEED = 3 # degrees/second. DOME_OPEN_TIME = 270 # seconds. DOME_CLOSE_TIME = 270 # seconds. # ********** Camera & filter wheel (STXL-6303E) Timing: MAX_AGGREGATE_EXPOSURE_NO_GUIDING = 119 # seconds; GUIDE_STAR_ACQUISITION = 17.2 # seconds (if needed) (was 14.2) GUIDER_CHECK_DURATION = 7 # seconds (if needed) (was 4) NEW_FILTER_DURATION = 5 # seconds; filter change and focuser change NEW_EXPOSURE_DURATION_EX_GUIDER_CHECK = 19.3 # seconds; image download, plate solving (excl exposure) # ********** EVD Preferences: BURN_EXPOSURE = 240 # seconds per exposure V_MAG_WARNING = 16.5 # a predicted V magnitude > this will trigger a warning line in Summary file. ABSOLUTE_MAX_EXPOSURE_TIME = 900 # seconds ABSOLUTE_MIN_EXPOSURE_TIME = 2.5 # seconds [20190318, was 3 seconds] MIN_TOTAL_EXP_TIME_PER_FILTER = 9 # seconds, thus 4 [was 3] exposures max per filter for LPVs # ********** MP parameters: # defining MP color sequence as tuple of tuples: (filter, seconds exposure, repeats). # Sloan-filter exposures at V mag = 14, intended to give S/N ~ 200. # Estimated from first Sloan images taken AN20220408, NMS. COLOR_SEQUENCE_AT_V14 = (('SR', 90, 1), ('SG', 200, 1), ('SI', 180, 1), ('SR', 90, 1), ('SI', 180, 1), ('SG', 200, 1), ('SR', 90, 1)) COLOR_FORCE_AUTOGUIDE = True # rather than cluttering Excel file with 'AG+'. def make_df_fov(fov_directory=FOV_DIRECTORY, fov_names_selected=None): """ Returns new, basic fov data frame, by reading FOV files (all or selected) in given directory_path. :param fov_directory: the directory_path from which to read FOV files. :param fov_names_selected: default = all FOV files within given directory_path. :return: basic data frame with columns: fov_name, main_target, fov_priority, obs_style, ra, dec. Index == fov_name. """ fov_dict = make_fov_dict(fov_directory, fov_names_selected) fov_names = list(fov_dict.keys()) df_fov = pd.DataFrame({'fov_name': fov_names}) # 1 column ('fov_name') only. # Add column of Fov objects, matched to column fov_names: df_fov['fov'] = [fov_dict[name] for name in fov_names] # Add other columns (directly from fov) for later convenience: df_fov['main_target'] = [fov_dict[name].main_target for name in fov_names] df_fov['fov_priority'] = [fov_dict[name].priority for name in fov_names] df_fov['obs_style'] = [fov_dict[name].observing_style for name in fov_names] df_fov['ra'] = [fov_dict[name].ra for name in fov_names] df_fov['dec'] = [fov_dict[name].dec for name in fov_names] df_fov['period'] = [fov_dict[name].period for name in fov_names] df_fov['target_type'] = [fov_dict[name].target_type for name in fov_names] df_fov['max_exposure'] = [fov_dict[name].max_exposure for name in fov_names] # Construct column 'radec' from 'ra' and 'dec': df_fov['radec'] = RaDec(0, 0) # dummy value to be replaced (needed to set column object type). for ind in df_fov.index: ra = df_fov.loc[ind, 'ra'] dec = df_fov.loc[ind, 'dec'] df_fov.loc[ind, 'radec'] = RaDec(ra, dec) # Sort by fov name, set index to fov name. df_fov.sort_values(by='fov_name', inplace=True) df_fov.index = df_fov['fov_name'] return df_fov def filter_df_fov_by_obs_styles(df_fov, obs_style_list=None): """ Returns df_fov filtered to contain only specified observing styles. :param df_fov: input fov dataframe. :param obs_style_list: list of observing styles to include (or string for one style). None simply returns the input df_fov. :return: filtered df_fov. """ if obs_style_list is None: return df_fov if isinstance(obs_style_list, str): obs_style_list = [obs_style_list] if len(obs_style_list) <= 0: return df_fov obs_style_list_lower = [style.lower() for style in obs_style_list] return df_fov[[style.lower() in obs_style_list_lower for style in df_fov.obs_style]] def filter_df_fov_by_fov_priority(df_fov, min_fov_priority=None, include_std_fovs=True): """ Returns df_fov filtered to contain only fovs with specified minimum fov_priority. :param df_fov: input fov dataframe. :param min_fov_priority: min fov priority to permit. None simply returns the input df_fov. :param include_std_fovs: True to include standard FOVs (even though they have no fov_priority). :return: filtered df_fov. Optionally includes all standard FOVs (default=include standard fovs). """ if min_fov_priority is None: return df_fov fov_priority_ok = df_fov["fov_priority"] >= min_fov_priority if include_std_fovs: is_standard_fov = df_fov["obs_style"].str.lower() == "standard" return df_fov[fov_priority_ok | is_standard_fov] else: return df_fov[fov_priority_ok] def complete_df_fov_an(df_fov, user_update_tolerance_days=DEFAULT_UPDATE_TOLERANCE_DAYS, an_string=None, site_name="NMS_Dome", min_available_seconds=MIN_AVAILABLE_SECONDS_DEFAULT, min_moon_degrees=MIN_MOON_DEGREES_DEFAULT, remove_zero_an_priority=True, remove_unobservables=True): if an_string is None or site_name == "": return df_fov an = Astronight(an_string, site_name) # Construct columns (specific to night and site) for available obs time, this astronight. df_fov = df_fov.assign(moon_deg=0.0) \ .assign(start=an.local_middark_utc) \ .assign(end=an.local_middark_utc) \ .assign(mid=an.local_middark_utc) \ .assign(seconds=0.0) \ .assign(available=' - '.join(2 * [4 * ' '])) \ .assign(an_priority=0.0) \ .assign(an_priority_bars='') # all dummy values to be overwritten later. # Fill in most columns. for ind in df_fov.index: ts_obs = an.ts_observable(df_fov.loc[ind, 'radec'], min_alt=an.site.min_altitude, min_moon_dist=min_moon_degrees) df_fov.loc[ind, 'moon_deg'] = df_fov.loc[ind, 'radec'].degrees_from(an.moon_radec) df_fov.loc[ind, 'start'] = ts_obs.start df_fov.loc[ind, 'end'] = ts_obs.end df_fov.loc[ind, 'mid'] = ts_obs.midpoint df_fov.loc[ind, 'seconds'] = ts_obs.seconds if ts_obs.seconds > 0: df_fov.loc[ind, 'available'] = ' - '.join([hhmm_from_datetime_utc(ts_obs.start), hhmm_from_datetime_utc(ts_obs.end)]) # Remove targets that can't be observed this astronight, *before* getting data from AAVSO: if remove_unobservables: enough_dark_time = df_fov['seconds'] >= min_available_seconds moon_dist_ok = df_fov['moon_deg'] >= min_moon_degrees is_observable = enough_dark_time & moon_dist_ok # print('Querying AAVSO for', str(sum(is_observable)), 'of', str(len(df_fov)), 'targets.') df_fov = df_fov[is_observable] # Update observations cache from AAVSO: loc = LocalObsCache() loc.update_fov_entries(df_fov, user_update_tolerance_days=user_update_tolerance_days) # Compute each target's priority for this astronight: for ind in df_fov.index: this_fov = df_fov.loc[ind, 'fov'] df_fov.loc[ind, 'an_priority'] = loc.calc_an_priority(this_fov, an, user_update_tolerance_days) max_bars = 16 int_an_priority = int(round(df_fov.loc[ind, 'an_priority'])) df_fov.loc[ind, 'an_priority_bars'] = \ (8 * '.' + (max_bars - 8) * '#')[0: min(max_bars, int_an_priority)].ljust(max_bars) if remove_zero_an_priority: df_fov = df_fov[df_fov['an_priority'] > 0.0] return df_fov.sort_values(by=['mid', 'an_priority'], ascending=[True, False]) class LocalObsCache: """ Holds a cache dataframe of most recent relevant observations for ~all FOVs. Can hold only one dataframe row per fov (however many filters constitute a previous obs). Will query AAVSO webobs site to refresh a database row if fov's main target looks too old. Cache dataframe columns are: fov_name [string] main_target [string] obs_style [string] cache_datetime: datetime this row was updated [datetime.datetime UTC] obs_datetime: datetime of most recent known observation [datetime.datetime UTC] obs_mag: magnitude of most recent observation [float] obs_mag_filter: filter in which obs_mag was measured [string] Typical usage: pl.make_an_plan('c:/Astro/ACP/AN20170525/planning.xlsx', exp_time_factor=0.75) """ def __init__(self): # Read in local cache if it exists. if os.path.isfile(LOCAL_OBS_CACHE_FULLPATH): self.df_cache = self._read_cache_from_csv() need_to_create_empty_cache = self.df_cache is None else: need_to_create_empty_cache = True if need_to_create_empty_cache: # Create *empty* dataframe with dtypes (incl. utc datetimes), write to cache file: self.df_cache = pd.DataFrame.from_dict(OrderedDict([ ('fov_name', ['dummy']), ('main_target', ['dummy']), ('obs_style', ['dummy']), ('cache_datetime', [datetime.now(timezone.utc)]), ('obs_datetime', [datetime.now(timezone.utc)]), ('obs_mag', [0.0]), ('obs_mag_filter', ['dummy'])]))[:0] self.df_cache.index.name = 'row_index' csv_fullpath = self._write_cache_to_csv() # empty cache to csv print('LocalObsCache: wrote new, empty cache file to ' + csv_fullpath) print('LocalObsCache opened; ' + str(len(self.df_cache)) + ' fovs.') def update_fov_entries(self, df_fov, user_update_tolerance_days=DEFAULT_UPDATE_TOLERANCE_DAYS, max_fovs_since_write=6): """ For each fov available this night (in df_fov_list), update the cache. :param df_fov: df_fov (typically of fovs available this night) [pandas DataFrame]. :param user_update_tolerance_days: pass-through parm [float]. :param max_fovs_since_write: controls frequence of writes to cache. :return: number of fovs updated (fn effect is to update this class's cache dataframe. """ fovs_since_write = 0 for fov in df_fov['fov']: need_to_write_csv = (fovs_since_write >= max_fovs_since_write - 1) self.update_one_fov_entry(fov, user_update_tolerance_days, write_csv=need_to_write_csv) if need_to_write_csv: fovs_since_write = 0 else: fovs_since_write += 1 self._write_cache_to_csv() # ensure cache written at end. def update_one_fov_entry(self, fov, user_update_tolerance_days=DEFAULT_UPDATE_TOLERANCE_DAYS, write_csv=False): """ This class's engine. Updates cache's entry for one fov, if entry is too aged. :param fov: fov to update in cache now [Fov object] :param user_update_tolerance_days: pass-through parm [float] :param write_csv: :return: if cache was updated, datetime (UTC) of new obs; else None. """ # TODO: If query to AAVSO yields no latest obs, put some placeholder with cache_dt at least. if fov is None: raise FovError main_target = fov.main_target # self._curate_df_cache(fov_name, main_target) # Determine whether update is needed, return if not. cache_row_pre_exists = fov.fov_name.lower() in list(self.df_cache['fov_name'].str.lower()) if cache_row_pre_exists: now = datetime.now(timezone.utc) current_cache_datetime = self.df_cache.loc[fov.fov_name, 'cache_datetime'] update_age = (now - current_cache_datetime).total_seconds() / (24 * 3600) if user_update_tolerance_days is None: update_tolerance_days = DEFAULT_UPDATE_TOLERANCE_DAYS else: update_tolerance_days = user_update_tolerance_days entry_fresh_enough = update_age <= update_tolerance_days if entry_fresh_enough: return self.df_cache.loc[fov.fov_name, 'obs_datetime'] # skip updating # Update fov's cache entry, from AAVSO webobs. obs_style = fov.observing_style obs_style_lower = obs_style.lower() target_type_lower = fov.target_type.lower() if target_type_lower == 'standard': return None if obs_style_lower == 'stare': num_obs = 200 else: num_obs = 100 print('AAVSO webobs query ' + fov.target_type + ' \'' + main_target + '\'...', end='', flush=True) recent_observations = AavsoWebobs(star_id=main_target, num_obs=num_obs) # from AAVSO print('ok.', end='', flush=True) latest_obs_df = None # default if no matches. if (obs_style_lower, target_type_lower) == ('lpv', 'mira'): latest_obs_df = self._latest_single_obs(fov, obs_style, recent_observations, allow_filters=['V']) elif (obs_style_lower, target_type_lower) == ('lpv', 'lpv'): latest_obs_df = self._latest_single_obs(fov, obs_style, recent_observations, allow_filters=['V', 'R']) elif obs_style_lower == 'monitor' and target_type_lower != 'astrometric': latest_obs_df = self._latest_single_obs(fov, obs_style, recent_observations, allow_filters=['V', 'R']) elif obs_style_lower == 'stare': latest_obs_df = self._latest_stare_obs(fov, recent_observations, allow_filters=['V', 'R']) else: print('\n*** WARNING: for fov \'' + fov.fov_name + '(obs_style, target_type) = (' + obs_style + ', ' + fov.target_type + ') not understood.', end='', flush=True) if cache_row_pre_exists: self.df_cache = latest_obs_df.combine_first(self.df_cache) # overwrites. else: # This else-block is kludge for pandas' mis-handling of append to empty DataFrame. if len(self.df_cache) >= 1: self.df_cache = self.df_cache.append(latest_obs_df, sort=True) else: self.df_cache = latest_obs_df.copy() if write_csv: self._write_cache_to_csv() print('..csv written.', end='', flush=True) print('') if latest_obs_df is None: return None return latest_obs_df.iloc[0].loc['obs_datetime'] # obs datetime, to signal OK. def _latest_single_obs(self, fov, obs_style, recent_observations, allow_filters): """ Takes a AavsoWebObs object and returns a pandas dataframe ready for inclusion in LocalCacheObs dataframe df_cache. Single-observation case (not stare). :param fov: fov to investigate for recent single observations [Fov object] :param obs_style: [string] ('Monitor' or 'LPV') :param recent_observations: recent observations for fov_name [AavsoWebObs object]. :param allow_filters: list of filters [string] to include in finding latest observation. :return: 1-row dataframe of relevant data about latest stare observation for this fov_name; return (with some None values) if no qualifying observation is found. """ allow_filters_lower = [f.lower() for f in allow_filters] table_filters_lower = recent_observations.table['filter'].str.lower() rows_to_keep = [f.lower() in allow_filters_lower for f in table_filters_lower] if sum(rows_to_keep) <= 0: latest_obs = None else: latest_obs = recent_observations.table[rows_to_keep].nlargest(1, 'jd').iloc[0] if latest_obs is None: # If no qualified observation found within webobs query, # construct placeholder row in df_cache, to prevent repeating query needlessly. latest_obs_df = pd.DataFrame.from_dict(OrderedDict([ ('fov_name', fov.fov_name), ('main_target', fov.main_target), ('obs_style', fov.observing_style), ('cache_datetime', [datetime.now(timezone.utc)]), ('obs_datetime', [None]), ('obs_mag', [None]), ('obs_mag_filter', [None])])) for column_name in ['cache_datetime']: latest_obs_df[column_name] = [x.to_pydatetime() for x in latest_obs_df[column_name]] else: latest_obs_df = pd.DataFrame.from_dict(OrderedDict([ ('fov_name', fov.fov_name), ('main_target', fov.main_target), ('obs_style', fov.observing_style), ('cache_datetime', [datetime.now(timezone.utc)]), ('obs_datetime', [datetime_utc_from_jd(latest_obs.jd)]), ('obs_mag', [latest_obs.mag]), ('obs_mag_filter', [latest_obs.loc['filter']])])) for column_name in ['cache_datetime', 'obs_datetime']: latest_obs_df[column_name] = [x.to_pydatetime() for x in latest_obs_df[column_name]] latest_obs_df.index = latest_obs_df['fov_name'].copy() latest_obs_df.index.name = 'row_index' return latest_obs_df def _latest_stare_obs(self, fov, recent_observations, allow_filters): """ Takes a AavsoWebObs object and returns a 1-row pandas dataframe ready for inclusion in LocalCacheObs dataframe df_cache. Stare case (multiple observations in one night), typically for eclipsers. :param fov: fov to investigate for recent stare observations [Fov object] :param recent_observations: recent observations for fov_name [AavsoWebObs object]. :param allow_filters: list of filters [string] to include in finding latest observation. :return: dataframe of relevant data about latest stare observation for this fov_name; return (with some None values) if no qualifying stare observation is found. """ if len(recent_observations.table) <= MIN_ROWS_ONE_STARE: return None # Find latest qualifying stare in each filter, return latest observation of latest stare. latest_stare_obs_df = None for this_filter in allow_filters: stare_already_found_this_filter = False this_filter_lower = this_filter.lower() table_filters_lower = recent_observations.table['filter'].str.lower() rows_to_keep = [f.lower() == this_filter_lower for f in table_filters_lower] table_this_filter = recent_observations.table[rows_to_keep].sort_values(by='jd', ascending=False) num_tests = len(table_this_filter) - MIN_ROWS_ONE_STARE + 1 if num_tests >= 1: for first_test_irow in range(0, num_tests): if not stare_already_found_this_filter: test_latest_jd = table_this_filter['jd'] \ .iloc[first_test_irow] test_earliest_jd = table_this_filter['jd'] \ .iloc[first_test_irow + MIN_ROWS_ONE_STARE - 1] if test_latest_jd - test_earliest_jd <= MAX_DAYS_ONE_STARE: stare_already_found_this_filter = True if latest_stare_obs_df is None: need_to_replace = True else: candidate_datetime = datetime_utc_from_jd(test_latest_jd) existing_datetime = latest_stare_obs_df.iloc[0].loc['obs_datetime'] need_to_replace = (candidate_datetime > existing_datetime) if need_to_replace: latest_stare_obs = table_this_filter.iloc[first_test_irow] latest_stare_obs_df = pd.DataFrame.from_dict(OrderedDict([ ('fov_name', fov.fov_name), ('main_target', fov.main_target), ('obs_style', fov.observing_style), ('cache_datetime', [datetime.now(timezone.utc)]), ('obs_datetime', [datetime_utc_from_jd(latest_stare_obs.jd)]), ('obs_mag', [latest_stare_obs.mag]), ('obs_mag_filter', [latest_stare_obs.loc['filter']])])) for column_name in ['cache_datetime', 'obs_datetime']: latest_stare_obs_df[column_name] = \ [x.to_pydatetime() for x in latest_stare_obs_df[column_name]] latest_stare_obs_df.index = latest_stare_obs_df['fov_name'].copy() latest_stare_obs_df.index.name = 'row_index' if latest_stare_obs_df is None: # If no qualified stare observation found within webobs query, # construct placeholder row in df_cache, to prevent repeating query needlessly. latest_stare_obs_df = pd.DataFrame.from_dict(OrderedDict([ ('fov_name', fov.fov_name), ('main_target', fov.main_target), ('obs_style', fov.observing_style), ('cache_datetime', [datetime.now(timezone.utc)]), ('obs_datetime', [None]), ('obs_mag', [None]), ('obs_mag_filter', [None])])) for column_name in ['cache_datetime']: latest_stare_obs_df[column_name] = [x.to_pydatetime() for x in latest_stare_obs_df[column_name]] latest_stare_obs_df.index = latest_stare_obs_df['fov_name'].copy() latest_stare_obs_df.index.name = 'row_index' return latest_stare_obs_df @staticmethod def _read_cache_from_csv(): cache = pd.read_csv(LOCAL_OBS_CACHE_FULLPATH, index_col=0) if len(cache) <= 0: return None for column_name in ['cache_datetime', 'obs_datetime']: if column_name not in cache.columns: return None # Parse cache_datetime column. cache['cache_datetime'] = [datetime.strptime(s, DT_FMT) for s in cache['cache_datetime']] # Parse obs_datetime column. for row_index in cache.index: if str(cache.loc[row_index, 'obs_datetime']).lower() != 'none': cache.loc[row_index, 'obs_datetime'] = \ datetime.strptime(cache.loc[row_index, 'obs_datetime'], DT_FMT) cache.loc[row_index, 'obs_mag'] = float(cache.loc[row_index, 'obs_mag']) else: cache.loc[row_index, 'obs_datetime'] = None cache.loc[row_index, 'obs_mag'] = None cache.loc[row_index, 'obs_mag_filter'] = None return cache def _write_cache_to_csv(self): # Very specifically writes datetimes in format: '2017-02-07 03:34:45.786374+0000' dt_format = '{:' + DT_FMT + '}' lines = [','.join(['row_index', 'fov_name', 'main_target', 'obs_style', 'cache_datetime', 'obs_datetime', 'obs_mag', 'obs_mag_filter']) + '\n'] for row_index in self.df_cache.index: row = self.df_cache.loc[row_index] if row['obs_datetime'] is None or isinstance(row['obs_datetime'], type(pd.NaT)): line = ','.join([row_index, row['fov_name'], row['main_target'], row['obs_style'], dt_format.format(row['cache_datetime']), 'None', 'None', 'None']) + '\n' else: line = ','.join([row_index, row['fov_name'], row['main_target'], row['obs_style'], dt_format.format(row['cache_datetime']), dt_format.format(row['obs_datetime']), '{:.4f}'.format(row['obs_mag']), row['obs_mag_filter']]) + '\n' lines.append(line) with open(LOCAL_OBS_CACHE_FULLPATH, 'w') as f: f.writelines(lines) # print("Cache written: " + str(len(self.df_cache)) + ' fovs.') return LOCAL_OBS_CACHE_FULLPATH def calc_an_priority(self, fov, an, user_update_tolerance_days=DEFAULT_UPDATE_TOLERANCE_DAYS): """ Calculates astronight priority for one fov. :param fov: :param user_update_tolerance_days: pass-through parm [float]. :param an: Astronight object for the night in question. :return: an_priority, from fov_priority and age of most recent obs [float]. """ if fov is None: print('LOC.calc_an_priority: fov \'' + fov + '\' not found in fov_dict.') return None if (fov.priority is None) or (fov.target_type.lower() == 'standard'): return 0 if fov.priority <= 0: return 0 # self.update_one_fov_entry(fov, user_update_tolerance_days, write_csv=True) if fov.fov_name not in self.df_cache.index: return 2 * fov.priority # the maximum, since no latest obs was accessible. latest_obs = self.df_cache.loc[fov.fov_name] if latest_obs.obs_datetime is None: return 2 * fov.priority # the maximum, since no latest obs was accessible. jd_latest_obs = jd_from_datetime_utc(latest_obs.obs_datetime) age_days = an.local_middark_jd - jd_latest_obs return fov.calc_priority_score(age_days) def _curate_df_cache(self, fov_name, main_target): """ Cull damaged records from self.df_cache. *** Deactivated 2017-02-07 pending manual debugging. *** :param fov_name: :param main_target: :return: [nothing] """ # Curation: If main_target is corrupt, delete all cache lines for that fov. if main_target is None: rows_to_delete = self.df_cache['fov_name'].str.lower() == fov_name.lower() self.df_cache = self.df_cache[rows_to_delete == False] return # Curation: If fov and target names don't match, delete all such fov and target lines. rows_with_wrong_target = \ (self.df_cache['fov_name'].str.lower() == fov_name.lower()) & \ (self.df_cache['main_target'].str.lower() != main_target.lower()) rows_to_keep = [not row for row in rows_with_wrong_target] self.df_cache = self.df_cache[rows_to_keep] rows_with_wrong_fov = \ (self.df_cache['main_target'].str.lower() == main_target.lower()) & \ (self.df_cache['fov_name'].str.lower() != fov_name.lower()) rows_to_keep = [not row for row in rows_with_wrong_fov] self.df_cache = self.df_cache[rows_to_keep] def __str__(self): return 'LocalObsCache object with ' + str(len(self.df_cache)) + \ ' observations.' def __repr__(self): return 'planning.LocalObsCache()' class AavsoWebobs: """ Simple class: one object:one star. Holds dataframe for one star from AAVSO's webobs database. Also updates local cache file (to unneeded future calls to webobs). For Observation Styles: LPV, Monitor, and Stare; no need for Standard or Burn. Usage: table = AavsoWebobs("AU Aur") [for one obs/night], or table = AavsoWebobs("ST Tri", stare=True) [for at least 10 obs/night in filter]. """ def __init__(self, star_id=None, num_obs=AAVSO_WEBOBS_ROWS_TO_GET, dataframe=None): if dataframe is not None: self.table = dataframe # typically for testing only. self.star_id = self.table['target_name'].iloc[0] else: self.table = get_aavso_webobs_raw_table(star_id, num_obs=num_obs) # normal case self.star_id = star_id # def get_local_aavso_reports(report_dir=None, earliest_an=None): # pass # report_dict = {} # for root, dirs, files in os.walk('C:/Astro/Images/Borea Photrix/'): # if root.endswith("Photometry"): # report = [file for file in files if file.startswith("AAVSO")] # if len(report) >= 1: # report_fullpath = os.path.join(root, report[0]) # with open(report_fullpath) as report_file: # lines = report_file.readlines() # # # Report = namedtuple('Report', ['JD', 'lines']) # # # # # def get_local_obs_age_dict(fov_dict=None, report_dir=None, target_an=None, limit_days=366): # # TODO: finish writing get_local_obs_age_dict() # """ # report_dir: directory_path in which all relevant AAVSO reports reside, as # "C:/Astro/2016/Photometry". # target_an: target astronight from which to count days, as "20151216". # limit_days: days into the past to look up old AAVSO reports. # Returns dict of (fov_name, days_since_last_local_obs). # """ # pass # if report_dir is not None and limit_days >= 1: # fov_age_dict = {name: None for name in fov_dict.keys()} # empty dict to start # # TODO: get report_list <- [report_text] for every eligible AAVSO report, # latest to earliest. # # for report_text in report_list: # # TODO: get jd_dict # i.e., {fov_name: latest jd_obs} for each main target in AAVSO report. # # for an_dir in dir_list: # an_dict = defaultdict(lambda: None) # # read AAVSO report, fill an_dict with target: latest JD # for fov_name, fov in fov_dict.items(): # an_age = an_dict[fov.main_target] # if an_age is not None: # dict_age = fov_age_dict[fov_name] # if dict_age is not None: # if an_age < dict_age: # fov_age_dict[fov_name] = an_age # else: # fov_age_dict[fov_name] = an_age # return fov_age_dict # # --------------------------------------------- def make_an_roster(an_date_string, output_directory, site_name='NMS_Dome', instrument_name='Borea', user_update_tolerance_days=DEFAULT_UPDATE_TOLERANCE_DAYS, exp_time_factor=1, min_an_priority=6): # TODO: recode download loop to only download those FOVs visible tonight & for which priority might # be high enough (read from csv: some might already be known to have been recently observed). """ Generates new .csv file containing info on each fov available this astronight. Typical usage: pl.make_an_roster("20170127", "C:/Astro/ACP/AN20170127/", user_update_tolerance_days=0.1, exp_time_factor=0.8) :param an_date_string: as '20170127'. Date of the evening to plan for [string] :param output_directory: directory_path in which to write Roster csv file [string] :param site_name: [string] :param instrument_name: [string] :param user_update_tolerance_days: esp for user to force update [float] :param exp_time_factor: multiply *raw* exp times by this; typically 0.6-0.9 [float] :param min_an_priority: hide Monitor and LPV targets with an_priority < this [float] :return: tuple of number of fovs, each obs style: (n_std, n_monitor_lpv, n_stare). [ints] """ an = Astronight(an_date_string=an_date_string, site_name=site_name) df_fov = make_df_fov(fov_directory=FOV_DIRECTORY, fov_names_selected=None) print(str(len(df_fov)), 'FOVs read.') instrument = Instrument(instrument_name) an_year = int(an_date_string[0:4]) an_month = int(an_date_string[4:6]) an_day = int(an_date_string[6:8]) day_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday'] \ [datetime(an_year, an_month, an_day).weekday()] lines_header = ['ROSTER file for ' + an_date_string + ' ' + day_of_week, ' as generated by photrix ' + '{:%Y-%m-%d %H:%M UTC}'.format(datetime.now(timezone.utc)), ' using exposure time factor = ' + '{:5.3f}'.format(exp_time_factor), an.acp_header_string().replace(',', ' '), '; Site=' + site_name + ' Instrument=' + instrument_name + ' min.alt = ' + '{:.1f}'.format(an.site.min_altitude) + u'\N{DEGREE SIGN}'] # Handle obs_style = 'Standard': lines_std = ['\n\n\nSTANDARD roster for ' + an_date_string + ': ' + 50 * '-', ',fov,fov, avail_utc,transit,minutes, stars'] df_fov_std = filter_df_fov_by_obs_styles(df_fov, obs_style_list=['Standard']) df_fov_std = complete_df_fov_an(df_fov_std, user_update_tolerance_days, an_string=an_date_string, site_name=site_name, min_available_seconds=MIN_AVAILABLE_SECONDS_DEFAULT, min_moon_degrees=MIN_MOON_DEGREES_DEFAULT, remove_zero_an_priority=False, remove_unobservables=True) for fov_index in df_fov_std.index: fov_name = df_fov_std.loc[fov_index, 'fov_name'] available = df_fov_std.loc[fov_index, 'available'] this_fov = Fov(fov_name) transit_hhmm = hhmm_from_datetime_utc(an.transit(RaDec(this_fov.ra, this_fov.dec))) exp_data = make_fov_exposure_data(fov_name, an, fov_dict=None, instrument=instrument, exp_time_factor=exp_time_factor, force_autoguide=False) # default=autoguide iff exp-times warrant. if exp_data is None: return # fail _, _, _, target_overhead, repeat_duration = exp_data minutes = (target_overhead + repeat_duration) / 60.0 n_stars = len(this_fov.aavso_stars) this_fov_line = ',' + fov_name + ',' + fov_name + ', ' + available + ',' + \ "=\"" + transit_hhmm + "\"" + ',' + str(int(minutes)) + \ ',' + '{:3d}'.format(n_stars) # formatting to placate Excel csv weirdness. lines_std.append(this_fov_line) # Handle obs_style = 'Stare': lines_stare_high_priority = \ ['\n\n\nSTARE roster for ' + an_date_string + ': ' + 50 * '-', ',fov,fov, avail_utc,transit,min/rpt, an_priority,,period, events'] lines_stare_low_priority = \ ['\n\n\nSTARE roster (alternate; low-priority) for ' + an_date_string + ': ' + 50 * '-', ',fov,fov, avail_utc,transit,min/rpt, an_priority,,period, events'] df_fov_stare = filter_df_fov_by_obs_styles(df_fov, obs_style_list=['Stare']) # Process each fov equally through most of the code, # then only in the last if block, write to one list or the other. df_fov_stare = complete_df_fov_an(df_fov_stare, user_update_tolerance_days, an_string=an_date_string, site_name=site_name, min_available_seconds=MIN_AVAILABLE_SECONDS_STARE, min_moon_degrees=MIN_MOON_DEGREES_STARE, remove_zero_an_priority=False, remove_unobservables=True) for fov_index in df_fov_stare.index: row = df_fov_stare.loc[fov_index] fov_name = row.loc['fov_name'] available = row.loc['available'] this_fov = Fov(fov_name) transit_hhmm = hhmm_from_datetime_utc(an.transit(RaDec(this_fov.ra, this_fov.dec))) exp_data = make_fov_exposure_data(fov_name, an, fov_dict=None, instrument=instrument, exp_time_factor=exp_time_factor, force_autoguide=False) # default=autoguide iff exp-times warrant. if exp_data is None: return # fail _, _, _, target_overhead, repeat_duration = exp_data minutes = (target_overhead + repeat_duration) / 60.0 an_priority = row.loc['an_priority'] an_priority_bars = row.loc['an_priority_bars'] period = row.loc['period'] row_ts = Timespan(row.loc['start'], row.loc['end']) # Timespan object for this row. # For now, we will consider that each Stare FOV wants either minima or maxima but not both. event_type_string = STARE_EVENT_TYPES.get(this_fov.target_type.lower(), None) if event_type_string is None: print(this_fov.fov_name + ': probable bad target_type in FOV.') do_minima = event_type_string.lower().startswith("min") do_maxima = event_type_string.lower().startswith("max") # Start with an *empty* dataframe of events, with correct dtypes: df_events = pd.DataFrame.from_dict(OrderedDict( [('event_type', 'dummy_type'), ('utc', [datetime.now(timezone.utc)])]))[:0] if do_minima: list_primary_mins = event_utcs_in_timespan(this_fov.JD_faint, this_fov.period, row_ts) if list_primary_mins is None: primaries_exist = False else: primaries_exist = len(list_primary_mins) >= 1 if primaries_exist: df_primary_mins = pd.DataFrame.from_dict(dict([('utc', list_primary_mins,)])) df_primary_mins['event_type'] = "1'" df_events = df_events.append(df_primary_mins, sort=True) list_secondary_mins = event_utcs_in_timespan(this_fov.JD_second, this_fov.period, row_ts) if list_secondary_mins is None: secondaries_exist = False else: secondaries_exist = len(list_secondary_mins) >= 1 if secondaries_exist: df_secondary_mins = pd.DataFrame.from_dict(dict([('utc', list_secondary_mins,)])) df_secondary_mins['event_type'] = "2'" df_events = df_events.append(df_secondary_mins, sort=True) if do_maxima: list_maxima = event_utcs_in_timespan(this_fov.JD_bright, this_fov.period, row_ts) if list_maxima is None: maxima_exist = False else: maxima_exist = len(list_maxima) >= 1 if maxima_exist: df_maxima = pd.DataFrame.from_dict(dict([('utc', list_maxima,)])) df_maxima['event_type'] = "max" df_events = df_events.append(df_maxima, sort=True) if len(df_events) >= 1: motive = this_fov.motive df_events.sort_values(by='utc', inplace=True) events_string = ' ' for row in df_events.itertuples(): events_string += str(row.event_type) + "=" + hhmm_from_datetime_utc(row.utc) + ' ' this_fov_line = ',' + fov_name + ',' + fov_name + ',' + available + ',' + \ "=\"" + transit_hhmm + "\"" + ',' + str(int(minutes)) + ',' + \ str(int(round(an_priority))) + ' ,' + an_priority_bars + ',' + \ '{:7.3f}'.format(period) + ' ,' + events_string + ',' + \ "\" " + motive + "\"" # formatting to placate Excel csv weirdness. if an_priority >= STARE_AN_PRIORITY_DIVIDER: lines_stare_high_priority.append(this_fov_line) else: lines_stare_low_priority.append(this_fov_line) # Handle obs_style = 'Monitor' or 'LPV': lines_mon_lpv = ['\n\n\nMONITOR / LPV roster for ' + an_date_string + ': ' + 50 * '-', ',fov,fov,avail_utc,transit,minutes, an_priority'] df_fov_mon_lpv = filter_df_fov_by_obs_styles(df_fov, obs_style_list=['Monitor', 'LPV']) df_fov_mon_lpv = filter_df_fov_by_fov_priority(df_fov_mon_lpv, min_fov_priority=0.5, include_std_fovs=False) df_fov_mon_lpv = complete_df_fov_an(df_fov_mon_lpv, user_update_tolerance_days, an_string=an_date_string, site_name=site_name, min_available_seconds=MIN_AVAILABLE_SECONDS_DEFAULT, min_moon_degrees=MIN_MOON_DEGREES_DEFAULT, remove_zero_an_priority=True, remove_unobservables=True) for fov_index in df_fov_mon_lpv.index: an_priority = df_fov_mon_lpv.loc[fov_index, 'an_priority'] if an_priority >= min_an_priority: fov_name = df_fov_mon_lpv.loc[fov_index, 'fov_name'] available = df_fov_mon_lpv.loc[fov_index, 'available'] this_fov = Fov(fov_name) transit_hhmm = hhmm_from_datetime_utc(an.transit(RaDec(this_fov.ra, this_fov.dec))) exp_data = make_fov_exposure_data(fov_name, an, fov_dict=None, instrument=instrument, exp_time_factor=exp_time_factor, force_autoguide=False) # so autoguide iff exp-times warrant. if exp_data is None: return # fail _, _, _, target_overhead, repeat_duration = exp_data minutes = (target_overhead + repeat_duration) / 60.0 an_priority_bars = df_fov_mon_lpv.loc[fov_index, 'an_priority_bars'] motive = Fov(fov_name).motive this_fov_line = ',' + fov_name + ',' + fov_name + ', ' + available + ',' + \ "=\"" + transit_hhmm + "\"" + ',' + str(int(minutes)) + ',' + \ str(int(round(an_priority))) + ' ,' + an_priority_bars + ',' + \ "\" " + motive + "\"" # formatting to placate Excel csv weirdness. lines_mon_lpv.append(this_fov_line) # Assemble all output lines: lines_all = lines_header + \ lines_std + \ lines_stare_high_priority + lines_stare_low_priority + \ lines_mon_lpv # Write all lines to file: os.makedirs(output_directory, exist_ok=True) output_fullpath = os.path.join(output_directory, 'Roster_' + an.an_date_string + '.csv') csv_written = False while not csv_written: try: with open(output_fullpath, 'w') as this_file: this_file.write('\n'.join(lines_all)) csv_written = True except PermissionError: input('***** CLOSE file \'' + output_fullpath + '\' and hit Enter.') print('Done.') class Plan: """ Holds all data for one ACP plan. :param plan_id: name of this plan, minimalist, as 'C' [string] """ def __init__(self, plan_id=None, plan_comment=None): self.plan_id = plan_id self.plan_comment = plan_comment.strip() self.directives = [] # as parsed from user input in parse_excel(). # Values populated in make_events(): self.utc_quitat = None # forced stop time (at end of last event) self.afinterval = None # in minutes; default=None if no afinterval requested for this plan. self.sets_requested = 1 # default self.chain_destination = None # next plan filename, None if no chaining requested for this plan. self.events = [] # holds only ONE element per intended event, no matter how many SETS in a plan. # Values populated in make_timeline(): self.utc_start = None # actual start time (computed later) self.utc_end = None # actual end time (computed later) self.sets_completed = 0 # actual number of set cycles completed (integer) self.afinterval_autofocus_count = 0 # count of autofocuses caused by AFINTERVAL, this set. # Lists of text lines to go before and after main body of lines (from make_events()): self.summary_pre_lines = [] self.summary_post_lines = [] self.acp_pre_lines = [] self.end_warning_lines = [] self.acp_post_lines = [] def quitat_reached_at(self, utc): if self.utc_quitat is None: return False return utc >= self.utc_quitat def __str__(self): return 'Plan object: ' + self.plan_id class Directive: """ Holds all initial data for one user-given directive (e.g., one cell in Excel spreadsheet). :param type: type of directive, from approved list, e.g., 'CHILL' or 'fov' [string, case-insens.] :param spec: dictionary of specification data to hold, depends on directive type [directory_path]. """ def __init__(self, type, spec_dict): self.type = type.lower() self.spec = spec_dict self.comment = '' def __str__(self): return 'Directive object: ' + self.type class Event: """ Holds all data for one event to be executed (per set). Each Event object will result in at least one line in summary doc and in ACP plan file. """ def __init__(self, event_type, summary_text, acp_lines, duration_total=0, duration_dict=None, target_name=None, ra=None, dec=None): self.type = event_type.lower() self.summary_text = summary_text # starting text for summary document self.summary_lines = [] # final lines for summary document self.acp_lines = acp_lines # list (always) of lines for ACP plan file [list of strings] self.duration_total = duration_total # total for event; 0 for waituntil, quitat, etc. # .duration_dict exists only for exposure-event types: burn, stare, fov, and image, # as: {'target_overhead': in sec, 'repeat_count': n, 'counts': [n], 'exp_times': [in sec]} : self.duration_dict = duration_dict # dict describing durations of indiv exposures, incl overheads. self.utc_end = None # for waituntil. self.target_name = target_name # for any exposure event type (= fov name for most) self.ra = ra # string, for exposure-event types self.dec = dec # " # For this event's summary line. Values later populated by make_timeline(): self.status = None # any of: 'ok', 'chain', 'quitat', 'wait' self.utc_summary_display = None # stored only for first SET. self.min_altitude = None # in degrees, for ALL sets in this plan. def calc_actual_duration(self, utc_start, utc_quitat, afinterval, utc_most_recent_autofocus): if utc_quitat is None: return self.duration_total, 0, utc_most_recent_autofocus if utc_start >= utc_quitat: return 0, 0, utc_most_recent_autofocus if self.duration_dict is None: return None total_exp_time = self.duration_dict['repeat_count'] * sum([c * e for (c, e) in zip(self.duration_dict['counts'], self.duration_dict['exp_times'])]) exposure_count = self.duration_dict['repeat_count'] * sum(self.duration_dict['counts']) overhead_per_exposure = (self.duration_total - total_exp_time - self.duration_dict['target_overhead']) / exposure_count utc_running = utc_start + timedelta(seconds=self.duration_dict['target_overhead']) event_autofocus_count = 0 # accumulator for this event. for i_repeat in range(self.duration_dict['repeat_count']): for c, e in zip(self.duration_dict['counts'], self.duration_dict['exp_times']): for i_exp in range(c): # Update clock for any AFINTERVAL-triggered autofocus: if afinterval is not None: minutes_since_last_autofocus = \ (utc_running - utc_most_recent_autofocus).total_seconds() / 60.0 if minutes_since_last_autofocus > afinterval: utc_running += timedelta(seconds=AUTOFOCUS_DURATION) utc_most_recent_autofocus = utc_running event_autofocus_count += 1 # Update clock for exposure itself: utc_running += timedelta(seconds=overhead_per_exposure + e) # Terminate event if QUITAT time has passed: if utc_running >= utc_quitat: return (utc_running - utc_start).total_seconds(), \ event_autofocus_count, utc_most_recent_autofocus return (utc_running - utc_start).total_seconds(), event_autofocus_count, utc_most_recent_autofocus def calc_lower_altitude(self, an, utc1, utc2): longitude, latitude = an.site.longitude, an.site.latitude longitude_hex, latitude_hex = degrees_as_hex(longitude), degrees_as_hex(latitude) target_radec = RaDec(self.ra, self.dec) _, alt_deg_utc1 = az_alt_at_datetime_utc(longitude_hex, latitude_hex, target_radec, utc1) _, alt_deg_utc2 = az_alt_at_datetime_utc(longitude_hex, latitude_hex, target_radec, utc2) return min(alt_deg_utc1, alt_deg_utc2) def __str__(self): return 'Event object: ' + self.summary_text def make_an_plan(plan_excel_path='c:/24hrs/Planning.xlsx', site_name='NMS_Dome', instrument_name='Borea', fov_dict=None, earliest_an_start_hhmm=None, exp_time_factor=1): """ Main user fn to take sketch Excel file and generate Summary and ACP Plan files. :param plan_excel_path: full path to Excel file holding all info for one night's observations. :param site_name: a Site object for location of observations. :param instrument_name: an Instrument object for scope to be used. :param fov_dict: fov_dict if available, default=None to generate new fov_dict (normal case). :param earliest_an_start_hhmm: 'hhmm' time to start plan, default=None for 'earliest possible' (normal case). :param exp_time_factor: multiply *raw* exp times by this; typically 0.6-0.9 [float] :return: Writes out Summary file with dateline, and one or more ACP plan files. Typical usage: pl.make_an_plan('c:/Astro/ACP/AN20170525/planning.xlsx', exp_time_factor=0.7) """ # TODO: LocalObsCache updates only for fovs actually used, not including Burns. ???meant for roster??? plan_list, an = parse_excel(plan_excel_path, site_name) reorder_directives(plan_list) if fov_dict is None: fov_dict = make_fov_dict() instrument = Instrument(instrument_name) make_events(plan_list, instrument, fov_dict, an=an, exp_time_factor=exp_time_factor) output_directory = os.path.split(plan_excel_path)[0] make_timeline(plan_list, an=an, earliest_hhmm=earliest_an_start_hhmm) make_acp_plan_files(plan_list, an, output_directory, exp_time_factor) make_summary_file(plan_list, fov_dict, an, output_directory, exp_time_factor) def parse_excel(excel_path, site_name='NMS_Dome'): """ Parses sketch Excel file, returns a list of Plan objects containing all directives and the relevant Astronight object. :param excel_path: full path to Excel file holding all info for one night's observations [str]. :param site_name: a Site object for location of observations [string] :return: list of Plan objects, astronight object (2-tuple) ----- Target types & their syntax: FOV_name :: for LPV, standards, and other once-per-night targets having FOV files, e.g., "FF Lyr" and "Std_SA32". STARE nnn FOV_name :: for stare targets; nnn=number of obs cycles, e.g., "STARE 100 ST Tri" (typically time-limited by QUITAT, not by number of obs cycles). BURN FOV_name RA Dec :: for 240-second images in V and I only; no FOV file necessary, e.g., "BURN FF Lyr 12:00:00 +23:34:45". IMAGE target_name filter_mag_or_sec_string RA Dec :: arbitrary imaging, either in magnitudes for the current instrument, or in explicity exposure times (may be mixed on a single line): *** Magnitudes syntax: "IMAGE New target V=12 B=12.5(2) 12:00:00 +23:34:45" to image New target in V filter (once) at targeted mag 12, and B filter twice at targeted mag 12.5. *** Exposure syntax: "IMAGE New target V=120s B=240s(2) 12:00:00 +23:34:45" to image New target in V filter (once) at 120 seconds, and B filter twice at 240 seconds (exposure times are NOT limited, so be careful!) All text between "IMAGE" and first word including a "=" character will make up the target name. COLOR target_name multiplier RA Dec :: for MP color imaging (mp_color.py), e.g., "COLOR MP_1626 1.1x 21:55:08 +24:24:45". 'x' in multipler is optional but recommended. ----- Legal directives: PLAN plan_id :: starts a plan section and names it. ; comment_text :: semicolon at beginning of cell makes cell a comment only. AFINTERVAL nnn :: autofocus interval in minutes SETS nn :: number of times to repeat all targets, autofocuses, chills, etc AUTOFOCUS :: force autofocus CHILL -nn :: chill the cooler to -nn deg C QUITAT nn:nn :: quit plan at nn:nn UTC WAITUNTIL nn:nn :: wait to start plan (first Set) until nn:nn UTC SKIPFILTER filter_name :: skip filter for following targets; omit filter_name to restore all filters. DOMEOPEN :: open dome (or roll-off roof) now. DOMECLOSE :: close dome (or roll-off roof) now. SHUTDOWN :: perform ACP shutdown of camera and park scope CHAIN plan_id :: chain to next plan BURN target_id RA Dec :: shorthand for IMAGE target_id V=240sec(1) I=240sec(1) RA Dec IMAGE target_id exp_specs RA Dec :: take images of target at RA, Dec; exp_specs define the filters and exposures, e.g., V=12.8 R=120sec(2) I=11(3) where 12.8 is a magnitude, 120sec is 120 seconds, and (2) specifies 2 exposures (and resulting images). COLOR target_id multipler RA Dec :: take MP color sequence defined by COLOR_SEQUENCE_AT_V14. fov_name :: if line begins with none of the above, it's a FOV name and takes filters, exposures, RA, and Dec from its FOV file. """ df = pd.read_excel(excel_path, header=None).dropna(axis=0, how='all').dropna(axis=1, how='all') nrow = len(df) ncol = len(df.columns) parsed_list = [] # nested list, one element per ACP plan. this_plan_id = '' an_date_string = str(df.iloc[0, 0]).strip() if int(EARLIEST_AN_DATE) < int(an_date_string) < int(LATEST_AN_DATE): an = Astronight(an_date_string, site_name) else: print('>>>>> STOPPING: an_date_string '" + an_date_string + " ""' SEEMS UNREASONABLE (update LATEST_AN_DATE?).') return plan_list = [] this_plan = None macro_dict = dict() macro_field_keys = ['^' + str(i + 1) for i in range(9)] for irow in range(1, nrow): for icol in range(ncol): cell = df.iloc[irow, icol] if isinstance(cell, str): do_this_cell = True else: do_this_cell = ~np.isnan(cell) if do_this_cell: # Extract and process substrings from this cell: cell_str_as_read = str(cell).strip() cell_str_lower = cell_str_as_read.lower() # Add MACRO directive that stores text in a dict for later use; then continue loop: if cell_str_lower.startswith('macro '): _, macro_key, macro_command = tuple(cell_str_as_read.split(maxsplit=2)) macro_dict[macro_key] = macro_command continue # If first word of command is in macro dict, substitute expanded macro for command. words = cell_str_as_read.split() macro_command = macro_dict.get(words[0], None) if macro_command is not None: macro_misused = False insert_strings = words[1:] for i_key, macro_field_key in enumerate(macro_field_keys): iloc = macro_command.find(macro_field_key) if iloc >= 0: if i_key < len(insert_strings): insert_string = insert_strings[i_key] else: macro_misused = True insert_string = '???' macro_command = macro_command.replace(macro_field_key, insert_string) if macro_misused: print(' >>>>> ERROR: Macro misused in cell \'' + cell_str_as_read + '\'') cell_str_as_read = macro_command cell_str_lower = cell_str_as_read.lower() # refresh this variable. # Handle any comment after first semi-colon: split_str = cell_str_as_read.split(';', maxsplit=1) command = split_str[0].strip() if len(split_str) > 1: comment = split_str[1].rstrip() else: comment = NO_COMMENT_STRING # Determine action type and add action to directive_list: if cell_str_lower.startswith('plan'): if this_plan is not None: plan_list.append(this_plan) # save previous plan, if any this_plan_id = an_date_string + '_' + command[len('plan'):].strip() this_plan = Plan(this_plan_id, comment) elif cell_str_lower.startswith('sets'): set_count = command[len('sets'):].strip() this_plan.directives.append(Directive('sets', {'count': int(set_count)})) elif cell_str_as_read.startswith(';'): this_plan.directives.append(Directive('comment', {'text': comment})) elif cell_str_lower.startswith('afinterval'): minutes = command[len('afinterval'):].strip() this_plan.directives.append(Directive('afinterval', {'minutes': int(minutes)})) elif cell_str_lower.startswith('autofocus'): this_plan.directives.append(Directive('autofocus', {})) elif cell_str_lower.startswith('chill'): tempC = command[len('chill'):].strip() this_plan.directives.append(Directive('chill', {'tempC': float(tempC)})) elif cell_str_lower.startswith('quitat'): hhmm_utc = command[len('quitat'):].strip().replace(':', '') this_plan.directives.append(Directive('quitat', {'utc': hhmm_utc})) elif cell_str_lower.startswith('waituntil'): value = command[len('waituntil'):].strip().replace(':', '') spec_dict = {'sun_degrees': None, 'utc': None} # overwrite one of these, just below. if float(value) < 0: spec_dict['sun_degrees'] = float(value) else: spec_dict['utc'] = value this_plan.directives.append(Directive('waituntil', spec_dict)) elif cell_str_lower.startswith('skipfilter'): value = command[len('skipfilter'):].strip() if cell_str_lower.startswith('skipfilters'): value = command[len('skipfilters'):].strip() # deprecated SKIPFILTERS (plural) case skipfilter_list = [item.strip() for item in value.split()] this_plan.directives.append(Directive('skipfilter', {'filters': skipfilter_list})) elif cell_str_lower.startswith('domeopen'): this_plan.directives.append(Directive('domeopen', {})) elif cell_str_lower.startswith('domeclose'): this_plan.directives.append(Directive('domeclose', {})) elif cell_str_lower.startswith('shutdown'): this_plan.directives.append(Directive('shutdown', {})) elif cell_str_lower.startswith('chain'): next_plan_filename = 'plan_' + an_date_string + '_' + \ command[len('chain'):].strip().upper() if not next_plan_filename.endswith('.txt'): next_plan_filename += '.txt' this_plan.directives.append(Directive('chain', {'filename': next_plan_filename})) elif cell_str_lower.startswith('burn'): value = command[len('burn'):].strip() this_fov_name, ra_string, dec_string = extract_ra_dec(value) # this_fov_name, ra_string, dec_string = tuple(value.rsplit(maxsplit=2)) this_plan.directives.append(Directive('burn', {'fov_name': this_fov_name.strip(), 'ra': ra_string.strip(), 'dec': dec_string.strip(), 'force_autoguide': True})) elif cell_str_lower.startswith('stare'): value = command[len('stare'):].strip() repeats, this_fov_name = tuple(value.split(maxsplit=1)) this_plan.directives.append(Directive('stare', {'fov_name': this_fov_name.strip(), 'repeat_count': int(repeats), 'force_autoguide': True})) elif cell_str_lower.startswith('image'): value = command[len('image'):].strip() force_autoguide, value = get_and_remove_option(value, FORCE_AUTOGUIDE_TOKEN) subvalue, ra_string, dec_string = extract_ra_dec(value) # subvalue, ra_string, dec_string = tuple(value.rsplit(maxsplit=2)) filter_entries = [] target_name = "WARNING: NO TARGET NAME" while True: if len(subvalue) <= 0: print(">>>>> WARNING: No target name for command '" + cell_str_as_read + "'.") break if len(subvalue.split()) == 1: target_name = subvalue break subsubvalue, item = subvalue.rsplit(maxsplit=1) is_filter_entry = '=' in item if is_filter_entry: filter_entries.append(item.strip()) else: target_name = subvalue break subvalue = subsubvalue filter_entries.reverse() if len(filter_entries) >= 1: this_plan.directives.append(Directive('image', {'target_name': target_name, 'filter_entries': filter_entries, 'ra': ra_string, 'dec': dec_string, 'force_autoguide': force_autoguide})) elif cell_str_lower.startswith('color'): value = command[len('color'):].strip() force_autoguide, value = get_and_remove_option(value, FORCE_AUTOGUIDE_TOKEN) # ignored. subvalue, ra_string, dec_string = extract_ra_dec(value) target_name, multiplier_string = tuple(subvalue.rsplit(maxsplit=1)) multiplier_string = multiplier_string.lower().split('x')[0] multiplier = float(multiplier_string) entries = tuple([(filt, multiplier * exp14, repeats) for (filt, exp14, repeats) in COLOR_SEQUENCE_AT_V14]) this_plan.directives.append(Directive('color', {'target_name': target_name, 'entries': entries, 'multiplier_string': multiplier_string, 'ra': ra_string, 'dec': dec_string, 'force_autoguide': COLOR_FORCE_AUTOGUIDE, 'comment': comment} # store comment for color. )) else: # Anything else we treat as a fov_name: value = command # use the whole command string (before comment); no directive string. force_autoguide, value = get_and_remove_option(value, FORCE_AUTOGUIDE_TOKEN) fov_name = value.strip() if len(fov_name) >= 2: this_plan.directives.append(Directive('fov', {'fov_name': fov_name, 'force_autoguide': force_autoguide})) plan_list.append(this_plan) # Ensure we save the last plan. return plan_list, an def get_and_remove_option(string, option): """ From a value string (e.g., 'IMAGE MP_191 AG+ Clear=200sec(1) 12:34:45 -06:34:21') and an option string (e.g., 'AG+), determine whether option is in value, and return value string with all instances of option token (space-bounded) removed, for further processing. Used in parse_excel(). :param string: value string of directive. [string] :param option: option string to locate. [string] :return: (flag value, value string with option removed. [2-tuple of (boolean, string)] """ p = ' ' + string + ' ' pu = p.upper() pu_option = ' ' + option.upper() + ' ' flag = (pu.find(pu_option) >= 0) while True: i = pu.find(pu_option) if i == -1: break pu = pu[:i + 1] + pu[i + len(pu_option):] p = p[:i + 1] + p[i + len(pu_option):] return flag, p.strip() def reorder_directives(plan_list): """ Puts directives within each Plan object in the desired order, returns the updated plan list. :param plan_list: the plan list whose directives are to be reordered [list of Plan objects]. :return: the plan list with reordered directives [list of Plan objects]. """ # Directives within each sublist retain user's given order. ideal_directive_ordering = [['quitat'], ['afinterval'], ['sets'], ['waituntil', 'chill', 'stare', 'fov', 'burn', 'image', 'color', 'autofocus', 'comment', 'skipfilter', 'domeopen', 'domeclose'], ['shutdown'], ['chain']] for plan in plan_list: reordered_directive_list = [] for directive_order_sublist in ideal_directive_ordering: for i_directive in range(len(plan.directives)): this_directive = plan.directives[i_directive] if this_directive.type.lower() in directive_order_sublist: reordered_directive_list.append(this_directive) plan.directives = reordered_directive_list num_omitted = len(plan.directives) - len(reordered_directive_list) if num_omitted > 0: print('>>>>> WARNING: ' + str(num_omitted) + ' actions in plan ' + plan.plan_id + 'were omitted during ordering.') # return plan_list def make_events(plan_list, instrument, fov_dict, an, exp_time_factor): """ Translate user's directives into executable events (to be repeated if more than one set). For simplicity, handle all directives, even if not enough plan time to complete them all (common). Compute event durations here, but postpone creation of full plan timeline to later function. :param plan_list: :param instrument: :param fov_dict: :param an: :param exp_time_factor: :return: [nothing--it modifies plan_list in place]. """ for plan in plan_list: skipfilter_list = [] # default # For each directive: make event and add it to plan's event list: for directive in plan.directives: if directive.type == 'waituntil': # NB there may be >1 waituntil, but only 1 active quitat. if directive.spec['sun_degrees'] is not None: sun_degrees = directive.spec['sun_degrees'] site_obs = ephem.Observer() site_obs.lat, site_obs.lon = str(an.site.latitude), str(an.site.longitude) site_obs.elevation = an.site.elevation sun = ephem.Sun(site_obs) site_obs.horizon = str(sun_degrees) utc_end = site_obs.previous_setting(sun, an.local_middark_utc) \ .datetime().replace(tzinfo=timezone.utc) this_summary_text = 'WAITUNTIL sun reaches ' + \ '{0:g}'.format(sun_degrees) + u'\N{DEGREE SIGN}' + ' alt' this_acp_entry = ['#WAITUNTIL 1, ' + '{0:g}'.format(sun_degrees) + ' ; deg sun alt'] else: hhmm = ('0' + directive.spec['utc'])[-4:] utc_end = an.datetime_utc_from_hhmm(hhmm) formatted_time = '{:%m/%d/%Y %H:%M}'.format(utc_end) this_summary_text = 'WAITUNTIL ' + hhmm + ' utc' this_acp_entry = ['#WAITUNTIL 1, ' + formatted_time + ' ; utc'] this_event = Event('waituntil', this_summary_text, this_acp_entry) this_event.utc_end = utc_end plan.events.append(this_event) elif directive.type == 'chill': this_summary_text = 'CHILL ' + '{0:g}'.format(directive.spec['tempC']) this_acp_entry = ['#CHILL ' + '{0:g}'.format(directive.spec['tempC'])] this_event = Event('chill', this_summary_text, this_acp_entry, CHILL_DURATION) this_event.setpoint = directive.spec['tempC'] plan.events.append(this_event) elif directive.type == 'stare': n_repeats = directive.spec['repeat_count'] fov_name = directive.spec['fov_name'] this_summary_text = 'Stare ' + str(n_repeats) + ' repeats at ' + fov_name if directive.spec['force_autoguide'] is True: this_summary_text += ' AG+' exp_data = make_fov_exposure_data(fov_name, an, fov_dict, instrument, exp_time_factor=exp_time_factor, skipfilter_list=skipfilter_list, force_autoguide=directive.spec['force_autoguide']) if exp_data is None: return # fail filters, counts, exp_times, target_overhead, repeat_duration = exp_data event_duration = target_overhead + n_repeats * repeat_duration duration_comment = str(round(repeat_duration / 60.0, 1)) + ' min/repeat --> ' + \ str(round(event_duration / 60.0, 1)) + ' min (nominal)' this_fov = fov_dict[fov_name] this_acp_entry = [';', '#REPEAT ' + str(n_repeats) + ';', '#DITHER 0 ;', '#FILTER ' + ','.join(filters) + ' ;', '#BINNING ' + ','.join(len(filters) * ['1']) + ' ;', '#COUNT ' + ','.join([str(c) for c in counts]) + ' ;', '#INTERVAL ' + ','.join([str(e).split('.0')[0] for e in exp_times]) + ' ; ' + duration_comment, ';----' + this_fov.acp_comments, fov_name + '\t' + ra_as_hours(this_fov.ra) + '\t' + dec_as_hex(this_fov.dec)] if directive.spec['force_autoguide'] is True: this_acp_entry.insert(1, '#AUTOGUIDE ; Automatic for stare target') duration_dict = {'target_overhead': target_overhead, 'repeat_count': n_repeats, 'counts': counts, 'exp_times': exp_times} this_event = Event('stare', this_summary_text, this_acp_entry, event_duration, duration_dict, ra=ra_as_hours(this_fov.ra), dec=dec_as_hex(this_fov.dec)) this_event.target_name = fov_name plan.events.append(this_event) elif directive.type == 'fov': fov_name = directive.spec['fov_name'] this_summary_text = fov_name if directive.spec['force_autoguide'] is True: this_summary_text += ' AG+' exp_data = make_fov_exposure_data(fov_name, an, fov_dict, instrument, exp_time_factor=exp_time_factor, skipfilter_list=skipfilter_list, force_autoguide=directive.spec['force_autoguide']) if exp_data is None: return # fail filters, counts, exp_times, target_overhead, repeat_duration = exp_data event_duration = target_overhead + 1 * repeat_duration duration_comment = ' --> ' + str(round(event_duration / 60.0, 1)) + ' min' this_fov = fov_dict[fov_name] this_acp_entry = [';', '#DITHER 0 ;', '#FILTER ' + ','.join(filters) + ' ;', '#BINNING ' + ','.join(len(filters) * ['1']) + ' ;', '#COUNT ' + ','.join([str(c) for c in counts]) + ' ;', '#INTERVAL ' + ','.join([str(e).split('.0')[0] for e in exp_times]) + ' ; ' + duration_comment, ';----' + this_fov.acp_comments, fov_name + '\t' + ra_as_hours(this_fov.ra) + '\t' + dec_as_hex(this_fov.dec)] if directive.spec['force_autoguide'] is True: this_acp_entry.insert(1, '#AUTOGUIDE ; Forced') duration_dict = {'target_overhead': target_overhead, 'repeat_count': 1, 'counts': counts, 'exp_times': exp_times} this_event = Event('fov', this_summary_text, this_acp_entry, event_duration, duration_dict, ra=ra_as_hours(this_fov.ra), dec=dec_as_hex(this_fov.dec)) this_event.target_name = fov_name plan.events.append(this_event) elif directive.type == 'burn': future_fov_name = directive.spec['fov_name'] ra = directive.spec['ra'] dec = directive.spec['dec'] this_summary_text = 'BURN ' + future_fov_name + ' ' + ra + ' ' + dec if directive.spec['force_autoguide'] is True: this_summary_text += ' AG+' this_acp_entry = [';', '#DITHER 0 ;', '#FILTER V,I ;', '#BINNING 1,1 ;', '#COUNT 1,1 ;', '#INTERVAL ' + str(BURN_EXPOSURE) + ',' + str(BURN_EXPOSURE) + ' ;----> BURN for new FOV file.', future_fov_name + '\t' + ra + '\t' + dec + ' ;'] if directive.spec['force_autoguide'] is True: this_acp_entry.insert(1, '#AUTOGUIDE ; Automatic for burn target') target_overhead, repeat_duration = tabulate_target_durations( filters=['V', 'I'], counts=[1, 1], exp_times=[BURN_EXPOSURE, BURN_EXPOSURE], force_autoguide=True) event_duration = target_overhead + 1 * repeat_duration duration_dict = {'target_overhead': event_duration - 2 * BURN_EXPOSURE, 'repeat_count': 1, 'counts': [1, 1], 'exp_times': 2 * [BURN_EXPOSURE]} this_event = Event('burn', this_summary_text, this_acp_entry, event_duration, duration_dict, ra=ra, dec=dec) this_event.target_name = future_fov_name plan.events.append(this_event) elif directive.type == 'image': target_name = directive.spec['target_name'] filter_entries = directive.spec['filter_entries'] ra = directive.spec['ra'] dec = directive.spec['dec'] filters, counts, exp_times, target_overhead, repeat_duration = \ make_image_exposure_data(filter_entries, instrument, exp_time_factor=exp_time_factor, force_autoguide=directive.spec['force_autoguide']) event_duration = target_overhead + 1 * repeat_duration this_summary_text = 'Image ' + target_name +\ ' ' + ' '.join([f + '=' + '{0:g}'.format(e) + 's(' + str(c) + ')' for (f, e, c) in zip(filters, exp_times, counts)]) +\ ' ' + ra + ' ' + dec if directive.spec['force_autoguide'] is True: this_summary_text += ' AG+' duration_comment = ' --> ' + str(round(event_duration / 60.0, 1)) + ' min' this_acp_entry = [';', '#DITHER 0 ;', '#FILTER ' + ','.join(filters) + ' ;', '#BINNING ' + ','.join(len(filters) * ['1']) + ' ;', '#COUNT ' + ','.join([str(c) for c in counts]) + ' ;', '#INTERVAL ' + ','.join([str(round(e, 1)).split('.0')[0] for e in exp_times]) + ' ; ' + duration_comment, ';---- from IMAGE directive -----', target_name + '\t' + ra + '\t' + dec] if directive.spec['force_autoguide'] is True: this_acp_entry.insert(1, '#AUTOGUIDE ; Forced') duration_dict = {'target_overhead': target_overhead, 'repeat_count': 1, 'counts': counts, 'exp_times': exp_times} this_event = Event('image', this_summary_text, this_acp_entry, event_duration, duration_dict, ra=ra, dec=dec) this_event.target_name = target_name plan.events.append(this_event) elif directive.type == 'color': target_name = directive.spec['target_name'] entries = directive.spec['entries'] ra = directive.spec['ra'] dec = directive.spec['dec'] filters, counts, exp_times, target_overhead, repeat_duration = \ make_color_exposure_data(entries, force_autoguide=True) event_duration = target_overhead + 1 * repeat_duration this_summary_text = 'Color ' + target_name + \ ' ' + directive.spec['multiplier_string'] + \ 'x ' + '{0:.1f}'.format(event_duration / 60.0) + ' min.' + \ ' ' + ra + ' ' + dec if directive.spec['force_autoguide'] is True: this_summary_text += ' AG+' if directive.spec['comment'] != NO_COMMENT_STRING: this_summary_text += ' ; ' + directive.spec['comment'] duration_comment = ' --> ' + str(round(event_duration / 60.0, 1)) + ' min' this_acp_entry = [';', '#DITHER 0 ;', '#FILTER ' + ','.join(filters) + ' ;', '#BINNING ' + ','.join(len(filters) * ['1']) + ' ;', '#COUNT ' + ','.join([str(c) for c in counts]) + ' ;', '#INTERVAL ' + ','.join([str(round(e, 1)).split('.0')[0] for e in exp_times]) + ' ; ' + duration_comment, ';---- from COLOR directive -----', target_name + '\t' + ra + '\t' + dec] if directive.spec['force_autoguide'] is True: this_acp_entry.insert(1, '#AUTOGUIDE ; Forced') duration_dict = {'target_overhead': target_overhead, 'repeat_count': 1, 'counts': counts, 'exp_times': exp_times} this_event = Event('color', this_summary_text, this_acp_entry, event_duration, duration_dict, ra=ra, dec=dec) this_event.target_name = target_name plan.events.append(this_event) elif directive.type == 'autofocus': this_summary_text = 'AUTOFOCUS' this_acp_entry = [';', '#AUTOFOCUS'] event_duration = AUTOFOCUS_DURATION this_event = Event('autofocus', this_summary_text, this_acp_entry, event_duration) plan.events.append(this_event) elif directive.type == 'comment': comment_text = directive.spec['text'] this_summary_text = ';' + comment_text this_acp_entry = [';' + comment_text] event_duration = 0 this_event = Event('comment', this_summary_text, this_acp_entry, event_duration) plan.events.append(this_event) elif directive.type == 'skipfilter': new_skipfilter_list = directive.spec['filters'] if len(new_skipfilter_list) == 0: skipfilter_list_text = 'none' else: skipfilter_list_text = ' '.join(new_skipfilter_list) this_summary_text = 'SKIPFILTER ' + skipfilter_list_text this_acp_entry = [';', '; (skipfilter: ' + skipfilter_list_text + ')'] skipfilter_list = new_skipfilter_list # changing this state variable event_duration = 0 this_event = Event('skipfilter', this_summary_text, this_acp_entry, event_duration) plan.events.append(this_event) elif directive.type == 'domeopen': this_summary_text = 'DOMEOPEN' this_acp_entry = [';', '#DOMEOPEN'] event_duration = DOME_OPEN_TIME this_event = Event('domeopen', this_summary_text, this_acp_entry, event_duration) plan.events.append(this_event) elif directive.type == 'domeclose': this_summary_text = 'DOMECLOSE' this_acp_entry = [';', '#DOMECLOSE'] event_duration = DOME_CLOSE_TIME this_event = Event('domeclose', this_summary_text, this_acp_entry, event_duration) plan.events.append(this_event) elif directive.type == 'shutdown': this_summary_text = 'SHUTDOWN' this_acp_entry = [';', '#SHUTDOWN'] event_duration = SHUTDOWN_DURATION this_event = Event('shutdown', this_summary_text, this_acp_entry, event_duration) plan.events.append(this_event) elif directive.type == 'quitat': plan.utc_quitat = an.datetime_utc_from_hhmm(directive.spec['utc']) elif directive.type == 'afinterval': plan.afinterval = float(directive.spec['minutes']) elif directive.type == 'sets': plan.sets_requested = int(directive.spec['count']) elif directive.type == 'chain': plan.chain_destination = directive.spec['filename'] else: print(">>>>> ERROR: in plan", plan.plan_id, ', directive', directive.type, 'not understood.') def make_timeline(plan_list, an, earliest_hhmm): # TODO: SHUTDOWN needs repair, to make it function & stop (1) in mid-plan, (2) even with SETS. # For now, put #SHUTDOWN in its own (last) plan. # Initialize times & intervals to state before first plan: utc_running = None if earliest_hhmm is not None: utc_running = an.datetime_utc_from_hhmm(earliest_hhmm) else: utc_running = an.datetime_utc_from_hhmm('0000') + timedelta(hours=AN_START_REL_UTC_0000) if utc_running > an.ts_dark.start: utc_running -= timedelta(hours=24) utc_most_recent_autofocus = utc_running - timedelta(days=1000) # keep python happy with a prev value. shutdown_performed = False current_chill_setpoint = None for plan in plan_list: plan.utc_start = utc_running no_plan_exposures_yet_encountered = True for i_set in range(1, plan.sets_requested + 1): # i_set = 1 to sets_requested, inclusive. # skipfilter_list = [] # reset at beginning of set execution. for event in plan.events: # First, do autofocus if AFINTERVAL since latest autofocus has passed or at plan startup: # TODO: rewrite (move?) this, so that long Stare & Image events can have > 1 autofocus. if plan.afinterval is not None: minutes_since_last_autofocus = \ (utc_running - utc_most_recent_autofocus).total_seconds() / 60.0 if event.type in ['burn', 'stare', 'fov', 'image', 'color']: if minutes_since_last_autofocus > plan.afinterval or \ no_plan_exposures_yet_encountered: # Perform AFINTERVAL autofocus: utc_running += timedelta(seconds=AUTOFOCUS_DURATION) utc_most_recent_autofocus = utc_running plan.afinterval_autofocus_count += 1 if plan.sets_requested == 1: event.summary_text += ' (af)' if plan.quitat_reached_at(utc_running): break # if quitat time reached during afinterval autofocus, do not run event. utc_start_event = utc_running # Store event's actual end time (incl quitat if active): if event.type == 'waituntil': # WAITUNTIL only works in first set (set 1): if i_set == 1: if plan.utc_quitat is not None: utc_end_event = min(event.utc_end, plan.utc_quitat) # not later than QUITAT. else: utc_end_event = event.utc_end # But definitely not before utc_running (time goes not backward): utc_end_event_actual = max(utc_end_event, utc_running) elif event.type in ['comment', 'skipfilter']: utc_end_event_actual = utc_start_event # zero duration elif event.type in ['shutdown', 'autofocus', 'domeopen', 'domeclose']: utc_end_event_actual = utc_start_event + timedelta(seconds=event.duration_total) elif event.type == 'chill': if event.setpoint != current_chill_setpoint: utc_end_event_actual = utc_start_event + timedelta(seconds=event.duration_total) current_chill_setpoint = event.setpoint elif event.type in ['burn', 'stare', 'fov', 'image', 'color']: actual_duration, event_autofocus_count, utc_most_recent_autofocus = \ event.calc_actual_duration(utc_start_event, plan.utc_quitat, plan.afinterval, utc_most_recent_autofocus) if event_autofocus_count >= 1: event.summary_text += ' (' + str(event_autofocus_count) + ' af)' plan.afinterval_autofocus_count += event_autofocus_count utc_end_event_actual = utc_start_event + timedelta(seconds=actual_duration) no_plan_exposures_yet_encountered = False else: print('make_timeline() doesn\'t recognize event type"" ' + event.type) # Store event's summary display time (hhmm on summary line, usually for set 1): if i_set == 1: event.utc_summary_display = utc_start_event # Update event's minimum altitude (all sets, target event types only): if event.type in ['burn', 'stare', 'fov', 'image', 'color']: this_lower_alt = event.calc_lower_altitude(an, utc_start_event, utc_end_event_actual) if event.min_altitude is None: event.min_altitude = this_lower_alt else: event.min_altitude = min(event.min_altitude, this_lower_alt) # Store event's status: if event.type == 'chain': event.status = 'CHAIN' elif plan.quitat_reached_at(utc_running): event.status = 'QUITAT' elif utc_start_event < an.ts_dark.start or utc_end_event_actual > an.ts_dark.end: event.status = 'LIGHT' elif event.type in ['burn', 'stare', 'fov', 'image', 'color', 'autofocus', 'chill']: event.status = str(i_set) # default elif event.type in ['shutdown', 'waituntil', 'domeopen', 'domeclose']: event.status = 'ok' else: event.status = '' # Finally, update master clock at end of this event: utc_running = utc_end_event_actual # For SHUTDOWN, signal end of entire run: if event.type == 'shutdown': shutdown_performed = True break # out of event loop (to next set). # Stop events if shutdown run or quitat reached: if plan.quitat_reached_at(utc_running) or shutdown_performed: break # out of event loop (to next set, which will also stop) # Quit set if shutdown run or quitat reached: if plan.quitat_reached_at(utc_running) or shutdown_performed: break # out of set loop (to next plan) plan.sets_completed = i_set # Finish any end-of-plan business (incl saving statistics): plan.utc_end = utc_running # Quit plan if shutdown run or quitat reached: if shutdown_performed: break # out of plan loop to end of timeline. # Finish end-of-night business (or could go outside this function, instead): pass def make_acp_plan_files(plan_list, an, output_directory, exp_time_factor): # First, delete old ACP plan files: filenames = os.listdir(output_directory) for filename in filenames: if filename.startswith("plan_") and filename.endswith(".txt"): fullpath = os.path.join(output_directory, filename) os.remove(fullpath) # Then, make an ACP-format plan file for each plan: for plan in plan_list: if plan.plan_comment is None: plan_comment = '' else: plan_comment = '; ' + plan.plan_comment # noinspection PyListCreation plan_acp_lines = ['; ACP PLAN ' + plan.plan_id + plan_comment, '; as generated by photrix at ' + '{:%Y-%m-%d %H:%M UTC}'.format(datetime.now(timezone.utc)), '; using exposure time factor = ' + '{:5.3f}'.format(exp_time_factor)] plan_acp_lines.append(an.acp_header_string()) # Add SETS ACP directive if one exists: if plan.sets_requested > 1: plan_acp_lines.extend([';', '#SETS ' + str(int(plan.sets_requested))]) # Add QUITAT ACP directive if one exists: if plan.utc_quitat is not None: formatted_time = '{:%m/%d/%Y %H:%M}'.format(plan.utc_quitat) plan_acp_lines.extend([';', '#QUITAT ' + formatted_time + ' ; utc']) # Add AFINTERVAL ACP directive if one exists: if plan.afinterval is not None: if plan.afinterval > 0: plan_acp_lines.append('#AFINTERVAL ' + '{0:g}'.format(plan.afinterval)) if plan.utc_quitat is not None or plan.afinterval is not None: plan_acp_lines.append(';') # Add event lines: for event in plan.events: plan_acp_lines.extend(event.acp_lines) # Add CHAIN ACP directive if one exists: if plan.chain_destination is not None: plan_acp_lines.extend([';', '#CHAIN ' + plan.chain_destination]) # Write this ACP plan file: filename = 'plan_' + plan.plan_id + '.txt' output_fullpath = os.path.join(output_directory, filename) print('PRINT plan ' + plan.plan_id) with open(output_fullpath, 'w') as this_file: this_file.write('\n'.join(plan_acp_lines)) def make_summary_file(plan_list, fov_dict, an, output_directory, exp_time_factor): # First, delete old summary files: filenames = os.listdir(output_directory) for filename in filenames: if filename.startswith("Summary_") and filename.endswith(".txt"): fullpath = os.path.join(output_directory, filename) os.remove(fullpath) # Unpack summary_lines: an_year = int(an.an_date_string[0:4]) an_month = int(an.an_date_string[4:6]) an_day = int(an.an_date_string[6:8]) day_of_week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']\ [datetime(an_year, an_month, an_day).weekday()] header_lines = ['SUMMARY for AN' + an.an_date_string + ' ' + day_of_week.upper() + ' (site = ' + an.site.name + ')', ' as generated by photrix at ' + '{:%Y-%m-%d %H:%M UTC}'.format(datetime.now(timezone.utc)), ' using exposure time factor = ' + '{:5.3f}'.format(exp_time_factor) + ' min.alt = ' + '{:.1f}'.format(an.site.min_altitude) + u'\N{DEGREE SIGN}', an.acp_header_string(), '\n'] moon_is_a_factor = an.moon_phase > MOON_PHASE_NO_FACTOR # for this astronight radec_dict = dict() # collector to find and warn against Image events of same target w/ diff RA,Dec. # Local function: def make_summary_line(status_text, hhmm_text, utc_day_indicator, min_altitude, summary_text): if status_text is None: status_text = '' if hhmm_text is None: hhmm_text = 4 * ' ' if utc_day_indicator is None: utc_day_indicator = ' ' if min_altitude is not None: altitude_text = str(int(round(min_altitude))) else: altitude_text = ' ' if status_text == '1': status_text = 'ok' return ' '.join([status_text.rjust(8), hhmm_text + utc_day_indicator, altitude_text, summary_text]) # END local function. # Construct summary_lines for every event: for i_plan, plan in enumerate(plan_list): # Add lines to top of plan summary: plan.summary_pre_lines.append( make_summary_line(None, None, None, None, 60 * '-')) hhmm_start = hhmm_from_datetime_utc(plan.utc_start) hhmm_end = hhmm_from_datetime_utc(plan.utc_end) if i_plan == 0: display_start = 'dusk to ' else: display_start = hhmm_start + '-' plan.summary_pre_lines.append( make_summary_line(None, None, None, None, 'Begin Plan ' + plan.plan_id + ' :: ' + display_start + hhmm_end + ' utc')) if plan.plan_comment is not None: if len(plan.plan_comment.strip()) > 0: plan.summary_pre_lines.append( make_summary_line(None, None, None, None, plan.plan_comment)) if i_plan > 0: plan.summary_pre_lines.append( make_summary_line(None, hhmm_start, None, None, 'Plan entered.')) if plan.sets_requested > 1: plan.summary_pre_lines.append( make_summary_line(None, None, None, None, 'SETS ' + '{0:g}'.format(plan.sets_requested))) if plan.utc_quitat is not None: plan.summary_pre_lines.append( make_summary_line(None, None, None, None, 'QUITAT ' + hhmm_from_datetime_utc(plan.utc_quitat) + ' utc')) if plan.afinterval is not None: plan.summary_pre_lines.append( make_summary_line(None, None, None, None, 'AFINTERVAL ' + '{0:g}'.format(plan.afinterval))) # Add lines to end of plan summary: if plan.chain_destination is not None: plan.summary_post_lines.append( make_summary_line('CHAIN', hhmm_end, ' ', None, 'Chain to \'' + plan.chain_destination + '\'')) if plan.afinterval is not None: plan.summary_post_lines.append( make_summary_line(None, None, None, None, str(plan.afinterval_autofocus_count) + ' AFINTERVAL autofocuses done.')) plan.summary_post_lines.append('\n') for event in plan.events: # Add warning line if time wasted by waiting to start this plan (prev plan ended early). if event.type == 'waituntil': if i_plan > 0: gap_minutes = (event.utc_end - plan.utc_start).total_seconds() / 60.0 if gap_minutes > 1.0: plan.summary_pre_lines.append( ' >>>>>>>>>> WARNING: WAITUNTIL gap = ' + str(int(gap_minutes)) + ' minutes.') # Construct main summary text line for this event, write into its Event object: if event.type in ['waituntil', 'comment', 'skipfilter']: hhmm_text, utc_day_indicator = ' ', ' ' else: if event.utc_summary_display is not None: hhmm_text = hhmm_from_datetime_utc(event.utc_summary_display) if event.utc_summary_display < an.datetime_utc_from_hhmm('0000'): utc_day_indicator = '-' elif event.utc_summary_display > an.datetime_utc_from_hhmm('0000') + timedelta(days=1): utc_day_indicator = '+' else: utc_day_indicator = ' ' else: utc_day_indicator = ' ' if event.type in ['fov', 'stare', 'image', 'color', 'burn', 'autofocus', 'chill']: if event.status is None: event.status = 'SKIPPED' hhmm_text = None utc_day_indicator = None event.min_altitude = None summary_text_line = make_summary_line(event.status, hhmm_text, utc_day_indicator, event.min_altitude, event.summary_text) event.summary_lines = [summary_text_line] # Add warning line if Image event whose RA,Dec differs from previous Image event of same target. if event.type == 'image': previous_list = radec_dict.get(event.target_name, None) this_dict_value = (event.ra, event.dec) if previous_list is None: radec_dict[event.target_name] = [this_dict_value] # start list & skip warning. else: if any([v != this_dict_value for v in previous_list]): event.summary_lines.append( ' >>>>>>>>>> WARNING: ' + ' Previous Image entry for ' + event.target_name + ' has different RA, Dec.') radec_dict[event.target_name].append(this_dict_value) # add value to list. # Add warning line if moon is too close to this object and moon is up: if moon_is_a_factor: if event.type in ['burn', 'image', 'color', 'fov', 'stare']: moon_dist = an.moon_radec.degrees_from(RaDec(event.ra, event.dec)) # in degrees if moon_dist < MIN_MOON_DEGREES_DEFAULT: if event.utc_summary_display is not None: if not (an.ts_dark_no_moon.start <= event.utc_summary_display <= an.ts_dark_no_moon.end): event.summary_lines.append( ' >>>>>>>>>> WARNING: ' + event.target_name + ' MOON DISTANCE = ' + str(int(round(moon_dist))) + u'\N{DEGREE SIGN}' + ', vs. min ' + str(MIN_MOON_DEGREES_DEFAULT) + u'\N{DEGREE SIGN}') # Add warning line if fov target is estimated too faint in V: if event.type == 'fov': this_fov = fov_dict[event.target_name] if this_fov.observing_style.lower() == 'lpv': mags = this_fov.estimate_lpv_mags(an.local_middark_jd) v_mag = mags.get('V', None) if v_mag is not None: if v_mag >= V_MAG_WARNING: event.summary_lines.append( ' >>>>>>>>>> WARNING: above target estim. V Mag ~ ' + '{:.2f}'.format(v_mag) + ' very faint (>=' + '{0:g}'.format(V_MAG_WARNING) + ').') # Add warning line if autofocus and more than one sets requested (causing too many autofocuses): if event.type == 'autofocus' and plan.sets_requested > 1: event.summary_lines.append( ' >>>>>>>>>> WARNING: autofocus not recommended when sets > 1.') if plan.chain_destination is not None: # Add plan warning line if plan chains to itself: if plan.chain_destination.lower() == 'plan_' + plan.plan_id.lower() + '.txt': plan.end_warning_lines.append( ' >>>>>>>>>> ERROR: this plan attempts to chain to itself.') # Add plan warning line if chained-to plan does not exist: elif i_plan != len(plan_list) - 1: if plan.chain_destination.lower() != \ ('plan_' + plan_list[i_plan + 1].plan_id + '.txt').lower(): plan.end_warning_lines.append( ' >>>>>>>>>> ERROR: this plan attempts to chain,' ' but not to next plan.') # Add plan warning if no autofocus (or afinterval) given: if plan.afinterval is None and all([e.type != 'autofocus' for e in plan.events]): if any([e.type in ['burn', 'image', 'fov', 'stare', 'color'] for e in plan.events]): plan.end_warning_lines.append( ' >>>>>>>>>> WARNING: this plan has no autofocus or afinterval.') # Add plan warning if autofocus and afinterval) both in same plan: if plan.afinterval is not None and any([e.type == 'autofocus' for e in plan.events]): if any([e.type in ['burn', 'image', 'fov', 'stare', 'color'] for e in plan.events]): plan.end_warning_lines.append( ' >>>>>>>>>> WARNING: this plan has both autofocus and afinterval.') # Construct file contents by appending all required text lines: all_summary_lines = header_lines for plan in plan_list: all_summary_lines.extend(plan.summary_pre_lines) for event in plan.events: all_summary_lines.extend(event.summary_lines) all_summary_lines.extend(plan.end_warning_lines) all_summary_lines.extend(plan.summary_post_lines) # Write Summary file: output_fullpath = os.path.join(output_directory, 'Summary_' + an.an_date_string + '.txt') print('PRINT summary to ', output_fullpath) with open(output_fullpath, 'w') as this_file: this_file.write('\n'.join(all_summary_lines)) def make_fov_exposure_data(fov_name, an, fov_dict=None, instrument=None, exp_time_factor=1, skipfilter_list=[], force_autoguide=None): """ Calculates exposure data for ONE REPEAT of one given fov. :param fov_name: [string] :param an: [Astronight object] :param fov_dict: :param instrument: instrument data [Instrument object] :param exp_time_factor: :param skipfilter_list: list of filter names to omit from this FOV observation [list of strings]. :return: tuple: (filters [list of str], counts [list of int], exp_times [list of float], target_overhead [float], repeat_duration [float]) """ if fov_dict is not None: # this_fov = fov_dict[fov_name] this_fov = fov_dict.get(fov_name, None) if this_fov is None: print(' >>>>> ERROR: FOV file not found for \'' + fov_name + '\'') return else: this_fov = Fov(fov_name) if not isinstance(instrument, Instrument): print(" >>>>> ERROR: make_fov_exposure_data() parm 'instrument' must be " + "a valid Instrument object") return None if force_autoguide is None: print(" >>>>> ERROR in make_fov_exposure_data(): force_autoguide is None but must be boolean.") return None obs_style = this_fov.observing_style filters = [] counts = [] exp_times = [] mags = dict() omit_list = [f.lower() for f in skipfilter_list] for obs in this_fov.observing_list: filter, mag, count = obs if filter.lower().strip() not in omit_list: filters.append(filter) counts.append(count) if obs_style.lower() in ['standard', 'monitor', 'stare']: exp_time = calc_exp_time(mag, filter, instrument, this_fov.max_exposure, exp_time_factor=exp_time_factor) elif obs_style.lower() == 'lpv': if len(mags) == 0: mags = this_fov.estimate_lpv_mags(an.local_middark_jd) # dict (get on 1st obs only) exp_time = calc_exp_time(mags[filter], filter, instrument, this_fov.max_exposure, exp_time_factor=exp_time_factor) else: print('****** WARNING: fov \'' + fov_name + '\' has unrecognized observing style \'' + obs_style + '\'.') return None exp_times.append(exp_time) if obs_style.lower() != 'stare': counts, exp_times = repeat_short_exp_times(counts, exp_times) target_overhead, repeat_duration = tabulate_target_durations(filters, counts, exp_times, force_autoguide=force_autoguide) # return types (3 lists, two floats): [str], [int], [float], float, float return filters, counts, exp_times, target_overhead, repeat_duration def make_image_exposure_data(filter_entries, instrument, exp_time_factor=1, force_autoguide=None): """ Calculates exposure data for given user-defined target ("IMAGE" directive). :param exp_time_factor: user-supplied multiplier of exp time from nominal, usually 0.5-1. [float] :param filter_entries: list of exposure-defining strings, as ['I=12','V=13(3)'], where I and V are filter names, 12 and 13 are target magnitudes, and (3) is an image count (=1 if absent). :param instrument: the instrument for which these exposures are wanted. [Instrument object] :param force_autoguide: True iff user wants to force autoguiding for this target. [boolean] :return: tuple of equal-length lists: (filters [str], counts [int], exp_times [float]) """ if force_autoguide is None: print(" >>>>> ERROR in make_image_exposure_data(): force_autoguide is None but must be boolean.") return None filters = [] counts = [] exp_times = [] for entry in filter_entries: this_filter, this_mag, this_count = None, None, None raw_filter, mag_string = entry.split("=", maxsplit=1) this_filter = raw_filter.strip() bits = mag_string.split("(") if len(bits) == 1: # case e.g. "V=13.2" this_count = 1 elif len(bits) == 2: # case e.g. "V=13.2(1)" try: this_count = int(bits[1].replace(")", "")) except ValueError: # print(' >>>> PARSING ERROR:', entry) raise ValueError(' >>>> PARSING ERROR (probably the number of repeats): ' + entry) # TODO: I'm not crazy about the next if-statement's condition. if 's' in bits[0].lower(): this_exp_time = float(bits[0].lower().split('s')[0]) else: this_mag = float(bits[0]) this_exp_time = calc_exp_time(this_mag, this_filter, instrument, max_exp_time=None, exp_time_factor=exp_time_factor) filters.append(this_filter) counts.append(this_count) exp_times.append(this_exp_time) counts, exp_times = repeat_short_exp_times(counts, exp_times) target_overhead, repeat_duration = tabulate_target_durations(filters, counts, exp_times, force_autoguide=force_autoguide) # return types (3 lists, two floats): [str], [int], [float], float, float return filters, counts, exp_times, target_overhead, repeat_duration def make_color_exposure_data(entries, force_autoguide=True): filters = [e[0] for e in entries] exp_times = [e[1] for e in entries] counts = [e[2] for e in entries] target_overhead, repeat_duration = tabulate_target_durations(filters, counts, exp_times, force_autoguide=force_autoguide) return filters, counts, exp_times, target_overhead, repeat_duration def tabulate_target_durations(filters, counts, exp_times, force_autoguide): aggregate_exposure = sum([counts[i] * exp_times[i] for i in range(len(counts))]) guiding_is_active = force_autoguide or aggregate_exposure > MAX_AGGREGATE_EXPOSURE_NO_GUIDING # TODO: get some of the next base values from instrument object. target_overhead = NEW_TARGET_DURATION + (GUIDE_STAR_ACQUISITION if guiding_is_active else 0) repeat_duration = aggregate_exposure + \ len(filters) * NEW_FILTER_DURATION + \ sum(counts) * NEW_EXPOSURE_DURATION_EX_GUIDER_CHECK + \ (sum(counts) * GUIDER_CHECK_DURATION if guiding_is_active else 0) return target_overhead, repeat_duration def repeat_short_exp_times(counts, exp_times): for i in range(len(counts)): if counts[i] * exp_times[i] < MIN_TOTAL_EXP_TIME_PER_FILTER: counts[i] = ceil(MIN_TOTAL_EXP_TIME_PER_FILTER / exp_times[i]) return counts, exp_times def calc_exp_time(mag, filter, instrument, max_exp_time, exp_time_factor=1): # Raw exposure time from mag + properties of instrument (camera & filters). exp_time_from_mag = instrument.filter_data[filter]['reference_exposure_mag10'] * \ 10.0 ** ((mag - 10.0) / 2.5) # Apply exposure time factor (from user, for this night) (before asymptotes and limits): exp_time = exp_time_factor * exp_time_from_mag # Apply absolute maximum as soft asymptote: exp_time = sqrt(1.0 / (1.0 / exp_time ** 2 + 1.0 / ABSOLUTE_MAX_EXPOSURE_TIME ** 2)) # Apply absolute minimum as soft asymptote: # as of 20170406, absolute minimum is from this module, not necessarily from instrument object. # i.e., use more stringent of the two minima. effective_minimum = max(ABSOLUTE_MIN_EXPOSURE_TIME, instrument.camera['shortest_exposure']) exp_time = sqrt(exp_time ** 2 + effective_minimum ** 2) # Apply rounding (at least 2 significant digits): if exp_time >= 10.0: exp_time = round(exp_time, 0) # round to nearest second else: exp_time = round(exp_time, 1) # round to nearest 0.1 second # Apply fov's hard maximum: if max_exp_time is not None: exp_time = min(max_exp_time, exp_time) return exp_time def extract_ra_dec(value_string): """ Split value string into subvalue, ra, dec, whether ra and dec are in standard hex format (e.g., 12:34:56 -11:33:42) or in TheSkyX format (e.g., 06h 49m 40.531s +63° 00' 06.920"). :param value_string: input string from parse_excel(), as above. :return: 3-tuple: subvalue (= everything but RA and Dec), ra, dec. [3-tuple of strings]. """ split_string = tuple(value_string.rsplit(maxsplit=2)) if split_string[1].endswith('\'') and split_string[2].endswith('\"'): # RA and Dec are in TheSkyX format, e.g., 06h 49m 40.531s +63° 00' 06.920": split_string = tuple(value_string.rsplit(maxsplit=6)) if len(split_string) != 7: raise SyntaxError('Cannot parse apparent TheSkyX-format RA-Dec string: ', value_string) subvalue = split_string[0] ra_items = [s.replace('h', '').replace('m', '').replace('s', '').strip() for s in split_string[1:4]] ra = ':'.join(ra_items) dec_items = [s.replace('°', '').replace('\'', '').replace('"', '').strip() for s in split_string[4:7]] dec = ':'.join(dec_items) else: # RA and Dec are given directly in std hex: if len(split_string) != 3: raise SyntaxError('Cannot parse apparent hex-format RA-Dec string ' + split_string) subvalue, ra, dec = split_string return subvalue, ra, dec __________PLANEWAVE_MOUNT_POINT_LIST______________________________________________ = 0 def make_planewave_point_list(n_pts, summer_winter, latitude=32.9, longitude=-105.5, fraction_extra_near_ecliptic=0.25): """Make mount model point list for Planewave L-series mount, points being in rational order. Minimizes dome movement. Begins near zenith, then makes one circuit North, East, South, West, North. Each line is one point in degrees: az, alt. May well return 10-12 points more than requested in n_pts. :param n_pts: [int] :param summer_winter: 'summer' or 'winter', to locate ecliptic. [string] :param latitude: scope latitude in degrees. [float] :param longitude: scope longitude in degrees, negative West. [float] :param fraction_extra_near_ecliptic (limited to [0 to 0.5]). [float] :return: None. Prints list of (az, alt) tuples, in degrees """ min_pts = 70 if n_pts < min_pts: raise ValueError('At least ' + str(min_pts) + ' points needed for proper mount model.') if summer_winter.lower() not in ['winter', 'summer']: raise ValueError('Parameter summer_winter must be \'summer\' or \'winter\'.') seed(2022) # Block_a: points around zenith, for stable start in case az polar alignment is off. # Block_b1: points ringing 30-35 degrees alt. # Block_b2: points between 35 and 70 degrees. # Block_c: extra points around ecliptic (nb: winter vs summer cases). n_pts_block_a = 10 n_pts_block_c = round(max(0.0, min(0.5, fraction_extra_near_ecliptic)) * n_pts) n_pts_blocks_b = n_pts - n_pts_block_a - n_pts_block_c n_pts_block_b1 = round(0.4 * n_pts_blocks_b) n_pts_block_b2 = n_pts - n_pts_block_a - n_pts_block_b1 - n_pts_block_c print('\nblock points =', str(n_pts_block_a), '(' + str(n_pts_block_b1) + ', ' + str(n_pts_block_b2) + ')', str(n_pts_block_c)) # Make block a (around zenith): az_spacing = 360.0 / n_pts_block_a block_az = [(az_spacing * (i + 0.5) + uniform(-5, 5)) % 360.0 for i in range(n_pts_block_a)] block_alt = [75.0 + uniform(-3, 3) for i in range(n_pts_block_a)] block_a_points = list(zip(block_az, block_alt)) block_a_points.sort(key=lambda x: x[0]) # sort by increasing azimuth (N,E,S,W). # Make block b1 (30-35 alt): az_spacing_b1 = 360.0 / n_pts_block_b1 az_b1 = [(az_spacing_b1 * (i + 0.5) + uniform(-5, 5)) % 360.0 for i in range(n_pts_block_b1)] alt_b1 = [30.0 + uniform(0, 5) for i in range(n_pts_block_b1)] block_b1_points = [(az, alt) for (az, alt) in zip(az_b1, alt_b1)] # Make block b2 (35-70 alt): az_spacing_b2 = 360.0 / n_pts_block_b2 az_b2 = [(az_spacing_b2 * (i + 0.5) + uniform(-5, 5)) % 360.0 for i in range(n_pts_block_b2)] alt_b2 = [35.0 + uniform(0, 35.0) for i in range(n_pts_block_b2)] block_b2_points = [(az, alt) for (az, alt) in zip(az_b2, alt_b2)] # Block C: band around ecliptic: if summer_winter.lower() == 'summer': dec_center, dec_width = -10, 20 # center declination of ecliptic, total width of band to sample. else: dec_center, dec_width = 20, 30 location = EarthLocation(lon=longitude * u.degree, lat=latitude * u.degree) time = Time.now() # actual time is irrelevant so long as it's constant, as we search all RA values. block_c_points = [] ra_deg = 0.0 while len(block_c_points) < n_pts_block_c: # Space points in RA more or less evenly: ra_deg = (ra_deg + uniform(12.0, 18.0)) % 360.0 dec_deg = dec_center + uniform(-dec_width / 2.0, dec_width / 2.0) sc = SkyCoord(ra=ra_deg * u.degree, dec=dec_deg * u.degree, frame='icrs') altaz = sc.transform_to(AltAz(obstime=time, location=location)) az, alt = altaz.az.degree % 360.0, altaz.alt.degree if alt >= 30.0: block_c_points.append(tuple([az, alt])) # Remove block b and c points too low around celestial pole: # TODO: move this process to astropy2022. max_abs_hourangle_degrees = 6 * 15 # i.e., RA hours either side of meridian. min_abs_cos_hourangle = cos(max_abs_hourangle_degrees / DEGREES_PER_RADIAN) block_bc_points = block_b1_points + block_b2_points + block_c_points # Formulae (1) and (2) from http://star-www.st-and.ac.uk/~fv/webnotes/chapter7.htm: # phi=latitude, delta=declination, H=hourangle, A=target azimuth, a=target altitude. cos_phi, sin_phi = cos(latitude / DEGREES_PER_RADIAN), sin(latitude / DEGREES_PER_RADIAN) block_bc_points_to_keep = [] for (az, alt) in block_bc_points: cos_a, sin_a = cos(alt / DEGREES_PER_RADIAN), sin(alt / DEGREES_PER_RADIAN) cos_A = cos(az / DEGREES_PER_RADIAN) # (1) sin(δ) = sin(a) sin(φ) + cos(a) cos(φ) cos(A) sin_delta = (sin_a * sin_phi) + (cos_a * cos_phi * cos_A) cos_delta = sqrt(1.0 - sin_delta ** 2) # happily, cosine of declination is always non-negative. # (2) cos(H) = { sin(a) - sin(δ) sin(φ) } / { cos(δ) cos(φ) } cos_H = (sin_a - sin_delta * sin_phi) / (cos_delta * cos_phi) if cos_H > min_abs_cos_hourangle: block_bc_points_to_keep.append((az, alt)) print('point', '{0:.2f}, {1:.2f}'.format(az, alt), 'kept,' ' cos(hourangle) =', '{0:.2f}'.format(cos_H)) else: print('point', '{0:.2f}, {1:.2f}'.format(az, alt), 'removed for hourangle.') block_bc_points = block_bc_points_to_keep # Sample block b and c points into two equal groups, sort each to make CW then CCW az circuits: seed(2022) shuffle(block_bc_points) # in-place. n_1 = floor(len(block_bc_points) / 2.0) block_bc_1, block_bc_2 = block_bc_points[:n_1], block_bc_points[n_1:] block_bc_1.sort(key=lambda x: +x[0]) # sort by increasing azimuth (N,E,S,W). block_bc_2.sort(key=lambda x: -x[0]) # sort by decreasing azimuth (N,W,S,E). # Construct all_points: all_points = block_a_points + block_bc_1 + block_bc_2 # Last: add extra points whenever az slews are too long and camera might not keep up. max_az_change_for_sync = 20 # degrees; the maximum dome slew for which dome can keep up. az_change_per_extra_point = 36 # degrees; the dome slew expected for a wasted mount slew/image cycle. alt_dither_per_extra_point = 5 # degrees new_all_points = [all_points[0]] for i in range(len(all_points) - 1): d_az_raw = abs(all_points[i+1][0] - all_points[i][0]) d_az = min(d_az_raw, 360.0 - d_az_raw) n_extra_points = ceil((d_az - max_az_change_for_sync) / az_change_per_extra_point) extra_points = [] for i_pt in range(n_extra_points): fraction = 0.5 + 0.5 * (float(i_pt) / float(n_extra_points)) az_extra = Angle(circmean(data=np.array([all_points[i][0], all_points[i + 1][0]]) * u.deg, weights = np.array([1.0 - fraction, fraction]))).\ wrap_at(360 * u.deg).degree # az_extra = (1.0 - fraction) * all_points[i][0] + fraction * all_points[i + 1][0] alt_extra = (1.0 - fraction) * all_points[i][1] + fraction * all_points[i + 1][1] extra_point = (az_extra, alt_extra) print('\n adding extra point', '{0:.2f}, {1:.2f}'.format(extra_point[0], extra_point[1])) print(' between', '{0:.2f}, {1:.2f}'.format(all_points[i][0], all_points[i][1]), 'and', '{0:.2f}, {1:.2f}'.format(all_points[i + 1][0], all_points[i + 1][1])) extra_points.append(extra_point) new_all_points.extend(extra_points) new_all_points.append(all_points[i+1]) # Write points to file: point_lines = ['{0:.2f}, {1:.2f}'.format(az, alt) for az, alt in new_all_points] fullpath = os.path.join(PHOTRIX_ROOT_DIRECTORY, 'point_list_' + summer_winter.lower() + '.txt') print(fullpath) print(str(len(new_all_points)), 'points.') if len(new_all_points) < 4 * n_pts: with open(fullpath, 'w') as this_file: this_file.write('\n'.join(point_lines)) def test_make_pw_point_list(): n_pts = 70 summer_winter = 'summer' make_planewave_point_list(n_pts, summer_winter, latitude=32.9, longitude=-105.5, fraction_extra_near_ecliptic=0.25)
""" The RepoManager class is the entry point for all actions related to repositories. It manages the list of repos and is responsible for passing requests from the rest of asset management system to the individual repos. """ import os from squirrel.repo.repo import Repo from squirrel.repo.cache import Cache from squirrel.repo import setuprepolist from squirrel.shared import constants from squirrel.shared import setupconfig from squirrel.shared import setuplocalization from squirrel.shared import urilib from squirrel.shared.squirrelerror import SquirrelError # ====================================================================================================================== class RepoManager(object): """ A class to manage multiple repos on disk, and to pass requests down to those repos. """ # ------------------------------------------------------------------------------------------------------------------ def __init__(self, config_p=None, repo_list_p=None, language="english"): """ Initialize the manager object responsible for managing different repos. The actual functionality for individual repos are managed by the repo class. :param config_p: An optional path to a config file. If omitted or None, use either the path given by an env var OR the standard location for the config file. :param repo_list_p: An optional path to a repo list file that identifies which repos are active in the system. If omitted or None, use either the path given by an env var OR the standard location for the repo list file. :param language: The language used for communication with the end user. Defaults to "english". """ self.language = language self.localized_resource_obj = setuplocalization.create_localization_object(language=language) self.config_obj = setupconfig.create_config_object(validation_dict=constants.REPO_CONFIG_SECTIONS, localized_resource_obj=self.localized_resource_obj, config_p=config_p) self.repo_list_obj = setuprepolist.create_repo_list_object(localized_resource_obj=self.localized_resource_obj, repo_list_p=repo_list_p) self.repos = dict() self.default_repo = None self.cache_obj = Cache(config_obj=self.config_obj, localized_resource_obj=self.localized_resource_obj) self.cache_obj.cache_if_needed(self.repos.values()) self._load_repos_from_repos_list() self._load_default_repo() # ------------------------------------------------------------------------------------------------------------------ def disambiguate_uri(self, uri, repo_required=False, path_required=False, name_required=False, name_must_exist=True): """ Given a partial uri, try to create a full, legal, existing uri (this includes the asset name). The format of a full URI is as follows: repo_name://relative/path/to/asset#asset_name The purpose of this function is to encapsulate any ambiguity stemming from allowing the end user to supply an incomplete URI into a single location. All other functions can then require an explicit URI. The rules about dealing with this ambiguity are as follows: 1) If name_required is True, then the partial uri MUST include the full asset name regardless of what other portions are included. If False, then the uri may or may not include an asset name. Asset names are separated from the rest of the URI by the # character. 2) If there is a :/ then the repo name will be drawn from the text in front of the :/. If there is no such :/ string then the repo name will be drawn from the default repo. If there is no default repo, an error is raised. 3) If there is a / (not including the :/ listed above) then it is assumed that the partial uri refers to either a) a full uri_path to an asset (including the asset name - see #1 above for more details) b) a full uri_path to the parent of an asset (i.e. the full path only without the asset name) or c) a partial uri_path that contains the first N number of path elements. Partial uri paths may not begin in the middle of a path. They must always begin at the root of the URI path. If none of these are the case, then an error is raised. 4) If there is no / (not including the :/ listed above) then it is assumed that the partial uri is an asset name only without a path. An attempt will be made to find that asset name. If it does not exist, or exists more than once, an error is raised. 5) If any of the above rules result in more than one asset being returned, or no assets being returned, an error is raised. Examples: reponame://uri/path/including#asset_name <- Complete URI reponame://uri/path/including <- Full URI path, without asset name reponame://uri/path <- Partial URI path, without asset name reponame:/#asset_name <- Repo name and asset name, no URI path. Note the single / reponame:/asset_name <- Repo name and asset name, no URI path. Note the single / and the missing # /uri/path/including/#asset_name <- Full URI path, including asset name, no repo name /uri/path/including <- Full URI path, no asset name, no repo name /uri/path <- Partial URI path, no asset name, no repo name #asset_name <- Asset name only, no repo name, no URI path. asset_name <- Asset name only, no repo name, no URI path. Note the missing # :param uri: The partial or complete uri to process. :param repo_required: If True, then a repo name is required. Defaults to False. :param path_required: If true, then a path is required (may be a partial path). Defaults to False. :param name_required: If True, then the asset name is a required part of the URI. Defaults to False. :param name_must_exist: If True, then the name (if provided) must exist as an existing asset on disk. :return: A three item tuple consisting of the repo, path, and asset name. If any of those are missing and not required, that particular item will be set to "". """ # TODO: Split this up into smaller functions # If they supplied just a blank string (or None), return just the default repo. if not uri: if repo_required or path_required or name_required: required = list() if repo_required: required.append("repo name") if path_required: required.append("URI path") if name_required: required.append("asset name") required_str = ", ".join(required) required_str = ",and ".join(required_str.rsplit(", ", 1)) err_msg = self.localized_resource_obj.get_error_msg(901) err_msg = err_msg.format(required=required_str) raise SquirrelError(err_msg, 901) if self.default_repo is None: err_msg = self.localized_resource_obj.get_error_msg(203) raise SquirrelError(err_msg, 203) repo_n = self.default_repo.repo_n if repo_n not in self.repos.keys(): err_msg = self.localized_resource_obj.get_error_msg(910) err_msg = err_msg.format(name=repo_n) raise SquirrelError(err_msg, 910) return repo_n + ":/#" # Split up the string into a repo and the remaining text. try: repo_n, remaining_str = uri.split(":/", maxsplit=1) except ValueError: # There is no repo name repo_n = "" remaining_str = uri # Split up the remaining_str into the path and the asset name. try: uri_path, asset_n = remaining_str.split("#", maxsplit=1) except ValueError: if "/" in remaining_str: uri_path = remaining_str asset_n = "" else: uri_path = "" asset_n = remaining_str # If there is no repo name, but one was required, raise an error if repo_required and not repo_n: err_msg = self.localized_resource_obj.get_error_msg(902) raise SquirrelError(err_msg, 902) # If there is no uri_path, but one was required, raise an error if path_required and not uri_path: err_msg = self.localized_resource_obj.get_error_msg(903) raise SquirrelError(err_msg, 903) # If there is no asset name, but one was required, raise an error if name_required and not asset_n: err_msg = self.localized_resource_obj.get_error_msg(904) raise SquirrelError(err_msg, 904) # If there is no repo name, get the default repo name if not repo_n: repo_n = self.default_repo.repo_n # If the repo name is not the name of a valid repo, raise an error if repo_n not in self.repos.keys(): err_msg = self.localized_resource_obj.get_error_msg(102) err_msg = err_msg.format(repo_name=repo_n) raise SquirrelError(err_msg, 102) # If there is a uri_path, validate it if uri_path: self.cache_obj.validate_uri_path_against_cache(repo_n=repo_n, uri_path=uri_path) # If there is an asset name, validate it if asset_n and name_must_exist: uri_path = self.cache_obj.uri_path_from_asset_name(repo_n=repo_n, asset_n=asset_n) return f"{repo_n}:/{uri_path}#{asset_n}" # ------------------------------------------------------------------------------------------------------------------ def uri_from_asset_path(self, asset_p): """ Given an asset path, return the uri path. :param asset_p: The full path to the asset. :return: A URI. """ return self.cache_obj.uri_from_asset_path(asset_p) # ------------------------------------------------------------------------------------------------------------------ def uri_path_from_asset_path(self, asset_p): """ Given an asset path, return the uri path. :param asset_p: The full path to the asset. :return: A URI path. """ return self.cache_obj.uri_path_from_asset_path(asset_p) # ------------------------------------------------------------------------------------------------------------------ def repo_obj_from_uri(self, uri): """ Given a URI, return a repo object. :param uri: The URI. :return: A repo object. """ if not urilib.validate_uri_format(uri): err_msg = self.localized_resource_obj.get_error_msg(201) err_msg = err_msg.format(uri=uri) raise SquirrelError(err_msg, 201) repo_n = urilib.repo_name_from_uri(uri) try: return self.repos[repo_n] except KeyError: err_msg = self.localized_resource_obj.get_error_msg(304) err_msg = err_msg.format(repo_name=repo_n) raise SquirrelError(err_msg, 304) # ------------------------------------------------------------------------------------------------------------------ def _load_default_repo(self): """ Gets the default repository object. It will first attempt to get it from the user's environment from an env variable. If that variable does not exist, it will attempt to extract it from the repos list file. :return: The default repo object. If there is no default, return None. """ try: default_repo_name = os.environ[constants.DEFAULT_REPO] except KeyError: if self.repo_list_obj.has_option("defaults", "default_repo"): default_repo_name = self.repo_list_obj.get_string("defaults", "default_repo") else: return None if not self._name_is_existing_repo(default_repo_name): return None if default_repo_name not in self.repos.keys(): return None self.default_repo = self.repos[default_repo_name] # ------------------------------------------------------------------------------------------------------------------ def _load_repo(self, repo_p): """ Loads a single repo given a path. If the path given does not point to a valid repo dir, an error will be raised. :param repo_p: The path to the repo to load. The path must exist and must be a valid repo. Raises an error if not. :return: Nothing. """ assert type(repo_p) is str if not os.path.exists(repo_p) or not os.path.isdir(repo_p): err_msg = self.localized_resource_obj.get_error_msg(302) err_msg = err_msg.format(repo_path=repo_p) raise SquirrelError(err_msg, 302) repo_obj = Repo(repo_root_d=repo_p, cache_obj=self.cache_obj, config_obj=self.config_obj, localized_resource_obj=self.localized_resource_obj) if not repo_obj.is_repo(): err_msg = self.localized_resource_obj.get_error_msg(301) err_msg = err_msg.format(repo_path=repo_p) raise SquirrelError(err_msg, 301) self.repos[repo_obj.repo_n] = repo_obj # ------------------------------------------------------------------------------------------------------------------ def _load_repos(self, repos): """ Loads all of the repos given in the list repos. Kicks up an error if any are missing or corrupt AND warn_on_load_error OR fail_on_load_error is set to True in the config file. :param repos: A list of full paths to the repos to be loaded. :return: Nothing. """ assert type(repos) is list warn_on_load_error = self.config_obj.get_boolean("repo_settings", "warn_on_load_error") fail_on_load_error = self.config_obj.get_boolean("repo_settings", "fail_on_load_error") for repo_path in repos: try: self._load_repo(repo_p=repo_path) except SquirrelError as e: if e.code in [301, 302]: if fail_on_load_error: err_msg = self.localized_resource_obj.get_error_msg(310) err_msg = err_msg.format(message=str(e)) raise SquirrelError(err_msg, 310) if warn_on_load_error: # <- rely on upstream to check the code and not actually quit. err_msg = self.localized_resource_obj.get_error_msg(311) err_msg = err_msg.format(message=str(e)) raise SquirrelError(err_msg, 311) else: raise # ------------------------------------------------------------------------------------------------------------------ def _load_repos_from_repos_list(self): """ Loads all of the repos listed in the repo list file. Kicks up an error if any are missing or corrupt AND warn_on_load_error OR fail_on_load_error is set to True in the config file. :return: Nothing. """ repo_names = self.repo_list_obj.options("repos") repos_p = list() for repo_name in repo_names: repos_p.append(self.repo_list_obj.get_string("repos", repo_name)) try: self._load_repos(repos_p) except SquirrelError as e: if e.code != 311: raise # ------------------------------------------------------------------------------------------------------------------ # TODO: Also remove repo data from cache def unload_repo(self, repo_n): """ Unloads a single repo. :param repo_n: The name of the repo to unload. :return: Nothing. """ assert type(repo_n) is str and repo_n if repo_n not in self.repos.keys(): err_msg = self.localized_resource_obj.get_error_msg(102) err_msg = err_msg.format(repo_name=repo_n) raise SquirrelError(err_msg, 102) del(self.repos[repo_n]) # ------------------------------------------------------------------------------------------------------------------ def _name_is_existing_repo(self, repo_n): """ Given a repo name, does a very minimal check to see if the repo is valid. Basically checks to see if the name is in the list of loaded repos. :param repo_n: The name of the repo we are testing. :return: True if the repo is valid. False otherwise. """ assert type(repo_n) is str return repo_n in self.repos.keys() # ------------------------------------------------------------------------------------------------------------------ def _update_repo_list_file(self, purge=False): """ Updates the repo list file to reflect the current state of all loaded repos. :param purge: If true, repos listed in the repo list file that do not exist on disk will be removed. If False, then these repos will be left in the file. Defaults to False. :return: Nothing. """ assert type(purge) is bool repos = dict() for key, value in self.repos.items(): repos[key] = value.repo_root_d if purge: self.repo_list_obj.replace_section("repos", repos) else: self.repo_list_obj.merge_section("repos", repos) self.repo_list_obj.replace_section("defaults", {"default_repo": self.default_repo.repo_n}) # ------------------------------------------------------------------------------------------------------------------ def save_repo_list_file(self): """ Saves the repo list file with all of the changes made during the current session (by add_repo, make_repo). :return: Nothing. """ self._update_repo_list_file(purge=True) self.repo_list_obj.save() # ------------------------------------------------------------------------------------------------------------------ def bless_repo(self, repo_p): """ Given a path to a repo, blesses all of the directories within the root path. :param repo_p: The repo path. :return: Nothing. """ # Create a temporary repo object that points to this path. if not os.path.isdir(repo_p): err_msg = self.localized_resource_obj.get_error_msg(302) err_msg = err_msg.format(repo_path=repo_p) raise SquirrelError(err_msg, 302) repo_obj = Repo(repo_root_d=repo_p, cache_obj=self.cache_obj, config_obj=self.config_obj, localized_resource_obj=self.localized_resource_obj) repo_obj.bless_repo() # ------------------------------------------------------------------------------------------------------------------ def make_repo(self, repo_d): """ Given a path, creates a new repo out of this path by blessing the directory structure. Automatically adds the new repo to the list of existing repos. :param repo_d: The path to the directory structure that should be made into a repo. :return: Nothing. """ if not os.path.isdir(repo_d): err_msg = self.localized_resource_obj.get_error_msg(100) err_msg = err_msg.format(dir=repo_d) raise SquirrelError(err_msg, 100) repo_obj = Repo(repo_root_d=repo_d, cache_obj=self.cache_obj, config_obj=self.config_obj, localized_resource_obj=self.localized_resource_obj) # Make sure the repo name is not already taken if repo_obj.repo_n in self.repos.keys() and repo_d != self.repos[repo_obj.repo_n].repo_root_d: err_msg = self.localized_resource_obj.get_error_msg(303) err_msg = err_msg.format(repo_name=repo_d) raise SquirrelError(err_msg, 303) self.repos[repo_obj.repo_n] = repo_obj self.bless_repo(repo_obj.repo_n) # ------------------------------------------------------------------------------------------------------------------ # TODO: Also cache this repo def add_repo(self, repo_d): """ Adds the repo to the dictionary of repos as well as to the config file. :param repo_d: The path to the repository. Must be a blessed root path. Note: if the name of this directory is the same as an existing repo, an error will be raised. :return: Nothing. """ assert os.path.isdir(repo_d) repo_obj = Repo(repo_root_d=repo_d, cache_obj=self.cache_obj, config_obj=self.config_obj, localized_resource_obj=self.localized_resource_obj) # Make sure the repo name is not already taken if repo_obj.repo_n in self.repos.keys() and repo_d != self.repos[repo_obj.repo_n].repo_root_d: err_msg = self.localized_resource_obj.get_error_msg(303) err_msg = err_msg.format(repo_name=repo_d) raise SquirrelError(err_msg, 303) # Make sure the path points to a valid, blessed repo if not repo_obj.is_repo(): err_msg = self.localized_resource_obj.get_error_msg(301) err_msg = err_msg.format(repo_path=repo_d) raise SquirrelError(err_msg, 301) self.repos[repo_obj.repo_n] = repo_obj # ------------------------------------------------------------------------------------------------------------------ def list_repos(self): """ Returns a list containing all of the repo objects. :return: A list of repo objects. """ return list(self.repos.values()) # ------------------------------------------------------------------------------------------------------------------ def list_broken_repos(self): """ Returns a dictionary where the key is the name of the repo that is missing from disk, and the value is the path where this repo should have been. :return: a dictionary where the key is the name of the repo that is missing from disk, and the value is the path where this repo should have been. """ repo_names = self.repo_list_obj.options("repos") output = dict() for repo_name in repo_names: if repo_name not in self.repos.keys(): output[repo_name] = self.repo_list_obj.get_string("repos", repo_name) return output # ------------------------------------------------------------------------------------------------------------------ def set_default_repo(self, repo_n): """ Sets the default repository. :param repo_n: The name of the default repo. This repo must exist in the list of current repos. :return: """ assert type(repo_n) is str and repo_n # The target repo must exist in the list of repos if repo_n not in self.repos.keys(): err_msg = self.localized_resource_obj.get_error_msg(102) err_msg = err_msg.format(repo_name=repo_n) raise SquirrelError(err_msg, 102) self.default_repo = self.repos[repo_n].repo_n # ------------------------------------------------------------------------------------------------------------------ def cache_repo(self, uri): """ Given a URI, build a cache for that repo. :param uri: The uri that contains the repo name. :return: Nothing. """ repo_n = urilib.repo_name_from_uri(uri) repo_obj = self.repos[repo_n] self.cache_obj.cache_repo(repo_obj=repo_obj) # ------------------------------------------------------------------------------------------------------------------ def cache_all_repos(self): """ Cache all repos. :return: Nothing. """ self.cache_obj.cache_all_repos(self.repos.values())
# your Gmail account import smtplib import os def send_mail(mailFrom,mailTo,appPass): # creates SMTP session s = smtplib.SMTP('smtp.gmail.com', 587) # start TLS for security s.starttls() # Authentication s.login(mailFrom, appPass) catManage = 'cat /opt/projetmaster-master/logs/report' body = os.popen(catManage).read() # message to be sent message = "\r\n".join([ "From: PIOT", "To: " + mailTo + "", "Subject: Maintenance Report", "", "" + body + "" ]) # sending the mail s.sendmail(mailFrom, mailTo, message) # terminating the session s.quit() return 0
import streamlit as st import streamlit.components.v1 as components import gca_requests as gl import datetime import urllib.parse import pandas as pd import gca_main_functions as mf def gca_page_2(): st.title("GOOGLE CALENDARS ANALYTICS PAGE 2") st.write(f'## 2 GOOGLE CALENDAR SPECIFIC ANALYSIS (1 CALENDAR)') st.sidebar.write(f'#### 2 GOOGLE CALENDAR SPECIFIC ANALYSIS (1 CALENDAR)') input_calendar_name = st.sidebar.selectbox('2 SELECT CALENDAR:', mf.list_calendar_names) input_calendar_id = [x["id"] for x in mf.list_calendar if x["summary"] == input_calendar_name][0] input_dates_analyze = st.sidebar.date_input("2_b SELECT RANGE OF DATES TO ANALYZE", [datetime.date(2019, 1, 1), datetime.date.today()], key="spe_di") input_calendar_specific_type = st.sidebar.selectbox('2_b SELECT EVENTS TYPE:', ["HOURS EVENTS", "DAYS EVENTS"], key="spe_sb") # Visualize events of selected calendar input_calendar_events = gl.retrieve_calendar_events_by_id(input_calendar_id) if len(input_calendar_events) == 0: st.write("No values in this calendar.") else: #TODO Filter before dataframe # df_events = filter_by_dates(input_calendar_events, input_dates_analyze, input_calendar_specific_type) df_events = pd.DataFrame(input_calendar_events) st.write(f'### **2_a_I** List of events in "{input_calendar_name}" calendar') st.dataframe(df_events) st.write(f'### **2_a_II** "{input_calendar_name}" calendar visualization') input_calendar_id_number = urllib.parse.quote(input_calendar_id) components.iframe( f"https://calendar.google.com/calendar/embed?src={input_calendar_id_number}&ctz=Europe%2FMadrid", width=1200, height=800, scrolling=True) if input_calendar_specific_type == "HOURS EVENTS": mf.specific_analysis(df_events, input_calendar_events, input_calendar_name, "dateTime", "%Y-%m-%dT%H:%M:%S%z", "hours", input_dates_analyze) elif input_calendar_specific_type == "DAYS EVENTS": mf.specific_analysis(df_events, input_calendar_events, input_calendar_name, "date", "%Y-%m-%d", "days", input_dates_analyze) st.sidebar.write(f"---") st.write(f"---")
# Generated by Django 3.0.8 on 2020-09-16 18:21 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('trip_places', '0010_auto_20200916_1810'), ('trip_places', '0010_auto_20200916_1620'), ] operations = [ ]
import argparse import textwrap with open('args.txt', 'w') as fp: fp.write('-f\nbar') parser = argparse.ArgumentParser(fromfile_prefix_chars='@') parser.add_argument('-f') print(parser.parse_args(['-f', 'foo', '@args.txt'])) parser.print_help()
import datetime import os from pygame import mixer import time os.system("python3 loading.py") now = datetime.datetime.now() fmt = "%Y-%m-%d-%H" def readr(): os.system("ls notes/*.txt") File = input("Type your file from the list above ") text = open( File, "r") print(text.read()) def readp(): os.system("ls *.mp3") song = input("Type song name(Remember only mp3 so far") mixer.init() mixer.music.load(song) mixer.music.set_volume(0.7) mixer.music.play() while True: start = input("Type s to start a new entry, r to read,p to play music(mp3 only), or c to stop.") if start == "s" : name = input("name") os.system("cd notes&&touch "+ name) Code = open( "notes/" + name, "w") Entry = input("Type your Entry. ") Code.write (str(Entry)) Code.close() if start == "r": readr() if start == "p": readp() if start == "c": exit("You Quit!")
#! /usr/bin/python """ Skeleton file for a ROS node implementation in this project. """ from dataclasses import dataclass from enum import Enum, auto import sys import sqlite3 import rclpy from rclpy.node import Node from rr_interfaces import msg from load_config import load_configuration from . import get_best from . import math_helper NODE_NAME = "controller" @dataclass class ControllerConfiguration: """ Controller main configuration: values loaded from parameters. """ index: int database: str conveior_width: int conveior_length: int conveior_speed: int arm_span: int arm_pos: [int] arm_pick_time: int arm_drop_time: int arm_speed: int arm_rest_dist: int timer_delay: float debug: bool @dataclass class TakeTime: arm_speed: int arm_span: int rest_dist: int def get_max_time(self, dist): cathetus_a = self.arm_span cathetus_b = dist + self.rest_dist dist = math_helper.pythagoras(cathetus_a, cathetus_b) return math_helper.ceil(dist / self.arm_speed) @dataclass class ReachTime: conv_speed: int arm_pos: int arm_span: int def reach_time(self, curr_loc): min_take_dist = self.arm_pos - self.arm_span distance = min_take_dist - curr_loc return math_helper.ceil(distance / self.conv_speed) def max_reach_time(self): return self.reach_time(0) class ArmState(Enum): """ Current arm action. """ READY = auto() WAITING = auto() WORKING = auto() @classmethod def from_int(cls, i: int): return [cls.READY, cls.WAITING, cls.WORKING][i] @dataclass class LastArmItem: arm_id: int curr_loc: int class ArmInfo: """ Manage the state of the robotic arm: time to reach from conveior begin and the current action. """ def __init__(self, reach_time: ReachTime, take_time: TakeTime): self.reach_time = reach_time self.take_time = take_time self.last_item = None def set_state(self, item_id: int): self.last_item = item_id def is_available(self, pos, cache_dict): if self.last_item is None: return True return self.check_time(pos, cache_dict) def check_time(self, pos, cache_dict): time = self.time_for_last_item(pos, cache_dict) return time < self.reach_time.max_reach_time() def time_for_last_item(self, pos, cache_dict): take_time = self.take_time.get_max_time(pos) if last_item := cache_dict.get(self.last_item): reach_time = self.reach_time.reach_time(last_item.curr_loc) else: reach_time = self.reach_time.max_reach_time() return take_time + reach_time class ArmStats: """ Manage statistics about the robotic arm: information such as the total distance and the number of picked items. """ def __init__(self): self.hits = 0 self.dist = 0 self.temp_dist = 0 def try_add(self, dist): self.temp_dist = dist self.dist += dist self.hits += 1 def untry_add(self): self.dist -= self.temp_dist self.hits -= 1 self.temp_dist = 0 def add_hit(self, dist): self.dist += dist self.hits += 1 @dataclass class ArmChooser: """ Helper class: choose the best arm in order to: 1. Be sure that the item will be taken 2. Ensure load balancing amoung the various arms. """ arm_stats: [ArmStats] arm_infos: [ArmInfo] item_cache: dict def choose_best(self, pos): best = get_best.GetBest() for i, (stat, info) in self.__iter_arms__(): if info.is_available(pos, self.item_cache): val = self.run_test(stat, pos) best.update(i, val) return best.get_best() def run_test(self, arm: ArmStats, pos): arm.try_add(pos) output = self.compute_score() arm.untry_add() return output def compute_score(self): hits_var = self.get_score(lambda arm: arm.hits) dist_var = self.get_score(lambda arm: arm.dist) return hits_var + dist_var def get_score(self, func): return math_helper.normed_variance(func(arm) for arm in self.arm_stats) def __iter_arms__(self): return enumerate(zip(self.arm_stats, self.arm_infos)) @dataclass class Controller: """ Handle incoming request and update arm stats. """ arm_stats: [ArmStats] arm_infos: [ArmInfo] item_cache: dict def handle_new_item(self, item_id: int, item_pos: int): chooser = ArmChooser(self.arm_stats, self.arm_infos, self.item_cache) best = chooser.choose_best(item_pos) self.arm_stats[best].add_hit(item_pos) self.arm_infos[best].set_state(item_id) self.item_cache[item_id] = LastArmItem(best, item_pos) return best def update_arm_state(self, state: ArmState, robot_id: int): if state == ArmState.READY: self.arm_infos[robot_id].last_item = None def remove_item(self, item_id): self.item_cache.pop(item_id) def update_location(self, item_id, item_pos): if item := self.item_cache.get(item_id): item.curr_loc = item_pos def make_arm_stat_list(count): return [ArmStats() for _ in range(count)] def controller_factory(conf: ControllerConfiguration): take_time = TakeTime(conf.arm_speed, conf.arm_span, conf.arm_rest_dist) arm_infos = [ ArmInfo(ReachTime(conf.conveior_speed, pos, conf.arm_speed), take_time) for pos in conf.arm_pos ] arm_stats = make_arm_stat_list(len(arm_infos)) return Controller(arm_stats, arm_infos, {}) class ControllerNode(Node): """Empty Node implementation""" def __init__(self): """Basic constructor declaration""" super().__init__(NODE_NAME) self.config = load_configuration(self, ControllerConfiguration) self.controller = controller_factory(self.config) self.conv_sub = self.create_subscription( msg.NewItem, "new_item_topic", self.conveior_state_listener, 50 ) self.arm_sub = self.create_subscription( msg.ArmState, "arm_state_topic", self.arm_state_listener, 50 ) self.pick_item_sub = self.create_subscription( msg.PickItem, "pick_item_topic", self.pick_item_listener, 50 ) self.arm_cmd = self.create_publisher(msg.TakeItem, "take_item_cmd_topic", 50) self.stat_pub = self.create_publisher( msg.ArmStats, "controller_status_topic", 50 ) self.item_loc_sub = self.create_subscription( msg.ItemLocation, "in_reach_topic", self.item_loc_listener, 50 ) self.create_timer(self.config.timer_delay, self.send_controller_status) def item_loc_listener(self, item_loc: msg.ItemLocation): self.controller.update_location(item_loc.item_id, item_loc.item_y) def pick_item_listener(self, pick_item: msg.PickItem): self.controller.remove_item(pick_item.item_id) def conveior_state_listener(self, new_item: msg.NewItem): robot_id = self.controller.handle_new_item(new_item.id, new_item.pos) self.__notify_robots__(new_item.id, robot_id) def arm_state_listener(self, arm_state: msg.ArmState): state = ArmState.from_int(arm_state.state) self.controller.update_arm_state(state, arm_state.robot_id) def send_controller_status(self): for i, arm in enumerate(self.controller.arm_stats): arm_stats = msg.ArmStats() arm_stats.robot_id = i arm_stats.hits = arm.hits arm_stats.dist = arm.dist self.stat_pub.publish(arm_stats) self.get_logger().info(f"STAT: {arm_stats}") def is_debug(self): return self.config.debug def get_index(self): return self.config.index def get_db_name(self): return self.config.database def __notify_robots__(self, item_id, robot_id): take_item = msg.TakeItem() take_item.item_id = item_id take_item.robot_id = robot_id self.arm_cmd.publish(take_item) def update_db_entry(database, index, status): if not database: return print(database, type(database)) conn = sqlite3.connect(database) query = f""" UPDATE instances SET status = {status} WHERE id = {index} """ curs = conn.cursor() curs.execute(query) conn.commit() conn.close() def run_node(node): if node.is_debug(): return try: rclpy.spin(node) except KeyboardInterrupt: print("Node arrested") update_db_entry(node.get_db_name(), node.get_index(), 1) except get_best.MissingSelection: print("There are no arms to choose from. Stop now!") update_db_entry(node.get_db_name(), node.get_index(), 2) def main(): """Default entrypoint for ros2 run""" rclpy.init(args=sys.argv) node = ControllerNode() run_node(node) node.destroy_node() rclpy.shutdown() if __name__ == "__main__": main()
from helpers import Distributions from config import Configuration import numpy as np class Gene: inno_tracker = 0 def __init__(self, generation, cause=None, copy=False): self.inno_num = Gene.inno_tracker if not copy: Gene.inno_tracker+=1 self.origin = generation self.last_active = generation self.cause = cause def age(self, generation): return generation - self.origin def __lt__(self, other): return self.inno_num < other.inno_num def __eq__(self, other): return self.inno_num == other.inno_num def __repr__(self): return self.describe() def __hash__(self): return self.inno_num def describe(self): pass class LayerGene(Gene): def __init__(self, generation, cause=None, copy=False): super().__init__(generation, cause, copy) self.type = Configuration.GENE_TYPES['layer'] if not copy: self.expressed = Configuration.DEFAULT_LAYER_EXPRESSED self.activation = Configuration.DEFAULT_ACTIVATION self.incoming = set() #these are for connections self.outgoing = set() self.units = 1 # is_connected checks if other is in the outgoing of self. def is_connected(self, other): if self == other: return True for i in other.incoming: if i in self.outgoing: return True return False def copy(self, maintain_incoming_outgoing=False, **kwargs): clone = LayerGene(generation=self.origin, cause=self.cause, copy=True) clone.inno_num = self.inno_num clone.expressed = self.expressed clone.activation = self.activation clone.incoming = self.incoming.copy() if maintain_incoming_outgoing else set() clone.outgoing = self.outgoing.copy() if maintain_incoming_outgoing else set() clone.units = self.units return clone def describe(self): return '<LayerGene-origin:{},inno_num:{},incoming:{},outgoing:{},expressed:{},activation:\'{}\',cause:\'{}\'>'.format(self.origin,self.inno_num, self.incoming, self.outgoing, self.expressed, self.activation, self.cause) class ConnectionGene(Gene): def __init__(self, generation, source=None, target=None, weight=None, bias=None, cause=None, copy=False): super().__init__(generation, cause, copy) self.type = Configuration.GENE_TYPES['connection'] if not copy: self.source = source self.target = target self.weight = weight if weight else np.random.randn(source.units, target.units) self.bias = bias if bias else np.zeros(target.units) self.expressed = Configuration.DEFAULT_CONNECTION_EXPRESSED def copy(self, maintain_weights=False, **kwargs): clone = ConnectionGene(generation=self.origin, cause=self.cause, copy=True) clone.inno_num = self.inno_num clone.source = self.source clone.target = self.target clone.weight = self.weight if maintain_weights else np.random.randn(self.source.units, self.target.units) clone.expressed = self.expressed return clone def describe(self): return '<ConnectionGene-origin:{},inno_num:{},source:{},target:{},weight:{},expressed:{},cause:\'{}\'>'.format(self.origin, self.inno_num, self.source, self.target, self.weight, self.expressed,self.cause) # def __hash__(self): # return super().__hash__() class InputGene(Gene): def __init__(self, generation, units=None, cause=None, copy=False): super().__init__(generation, cause, copy) self.type = Configuration.GENE_TYPES['input'] if not copy: self.units = units self.expressed = True self.outgoing = set() def copy(self, maintain_outgoing=False): clone = InputGene(generation=self.origin, cause=self.cause, copy=True) clone.units = self.units clone.expressed = self.expressed clone.outgoing = self.outgoing.copy() if maintain_outgoing else set() return clone def is_connected(self, other): if self == other: return True for i in other.incoming: if i in self.outgoing: return True return False def __repr__(self): return '<InputGene-outgoing:{},shape:{}>'.format(self.outgoing, self.units) class OutputGene(Gene): def __init__(self, generation, units=None, activation=None, cause=None, copy=False): super().__init__(generation, cause, copy) self.type = Configuration.GENE_TYPES['output'] if not copy: self.units = units self.activation = activation self.expressed = True self.incoming = set() def copy(self, maintain_incoming=False): clone = OutputGene(generation=self.origin, cause=self.cause, copy=True) clone.units = self.units clone.activation = self.activation clone.expressed = self.expressed clone.incoming = self.incoming.copy() if maintain_incoming else set() return clone def __repr__(self): return '<OutputGene-incoming:{},shape:{}>'.format(self.incoming, self.units) class OptimizerGene(Gene): def __init__(self, generation, cause=None, copy=False): super().__init__(generation, cause, copy) self.type = Configuration.GENE_TYPES['optimizer'] if not copy: self.name = Configuration.DEFAULT_OPTIMIZER self.params = Configuration.DEFAULT_OPTIMIZER_PARAMS[self.name] class PseudoGene(Gene): def __init__(self, generation, cause=None, copy=False): super().__init__(generation, cause, copy) self.type = Configuration.GENE_TYPES['pseudo'] if __name__ == '__main__': def test_gene(): generation = 1 gene1 = Gene(generation=generation) gene2 = Gene(generation=generation) assert gene1 < gene2 print('gene test successful') def test_layer_gene(): generation = 1 gene1 = LayerGene(generation=generation) gene2 = LayerGene(generation=generation) gene1_copy = gene1.copy(maintain_bias=True, maintain_incoming_outgoing=False) print(gene1) print('layer gene test successful') def test_connection_gene(): print('connection gene test successful') def test_input_gene(): print('input gene test successful') def test_output_gene(): print('output gene test successful') # def gene_tests(): # generation = 1 # node_gene = NodeGene() # node_gene.initialize(generation) # assert node_gene.origin is generation # generation += 1 # node_gene_2 = NodeGene() # node_gene_2.initialize(generation) # assert node_gene.inno_num + 1 is node_gene_2.inno_num # node_gene_3 = NodeGene() # node_gene_3.initialize(generation) # node_gene_3_copy = node_gene_3.copy() # assert node_gene_3_copy == node_gene_3 # generation += 1 # connection_gene = ConnectionGene() # connection_gene.initialize(0, 2, generation) # assert node_gene_3.inno_num + 1 is connection_gene.inno_num # connection_gene_copy = connection_gene.copy() # assert connection_gene == connection_gene_copy # return True test_gene() test_layer_gene() print('all tests succesful')
#!/usr/bin/env python # Copyright (c) Matscholar Development Team. # Distributed under the terms of the MIT License. from __future__ import print_function, unicode_literals import click from matscholar.cli.mscli_config import set_config from matscholar.collect import ScopusCollector from tabulate import tabulate from operator import itemgetter @click.group() def cli(): """ Welcome to the Matscholar Command Line Interface! To use this cli, configure your settings with `mscli configure` and then contribute abstracts to the project with `mscli contribute` (note: this is only for internal collaborators.) """ pass @click.command("configure") def configure(): """Used to configure Matscholar configuration settings.""" questions = [ { 'type': 'input', 'name': 'MATSCHOLAR_NAME', 'message': 'What is your full name? ', }, { 'type': 'input', 'name': 'MATSCHOLAR_TEXT_MINING_KEY', 'message': 'Enter your Scopus API text mining key ' '(obtained at https://dev.elsevier.com/apikey/manage ) : ', }, { 'type': 'input', 'name': 'MATSCHOLAR_HOST', 'message': 'Enter the hostname of the Matscholar DB: ', }, { 'type': 'input', 'name': 'MATSCHOLAR_USER', 'message': 'Enter your Matscholar username: ', }, { 'type': 'password', 'name': 'MATSCHOLAR_PASSWORD', 'message': 'Enter your Matscholar password: ', }, ] answers = {} for question in questions: answers[question["name"]] = input(question["message"]) set_config(answers) @click.command("contribute") @click.option('--count', default=1, help='number of blocks (default is 1)') def collect(count): """Used to contribute data to Matscholar database. Args: count (int): Number of blocks to process before exiting. """ collector = ScopusCollector() collector.collect(num_blocks=count) @click.command("scoreboard") def scoreboard(): """See how you rank against the Matscholar contributors. """ collector = ScopusCollector() scores = collector.db.build.aggregate([{"$group": {"_id": '$pulled_by', "count": {"$sum": 1}}}]) print(tabulate(sorted([[e["_id"], e["count"]] for e in scores], key=itemgetter(1), reverse=True), headers=['Name', 'Abstracts Contributed'])) cli.add_command(configure) cli.add_command(collect) cli.add_command(scoreboard) def main(): cli()
""" Example how to query tags and keywords from the database. """ from __future__ import print_function, nested_scopes, generators from simdb.databaseModel import * import simdb.databaseAPI as api import collections import numpy as np def print_caption(caption, size=80): print("#" + "=" * (size - 2) + "#") print("#", caption) print("#" + "=" * (size - 2) + "#") def test_list(x, y): """Function to check if two list are the same""" return collections.Counter(x) == collections.Counter(y) def test_dict(x, y): """ Function to check if two dicts are the same """ if set(x.keys()) != set(y.keys()): return False for k in x.keys(): vx = x[k] vy = y[k] # if type(x) != type(y): # return False if isinstance(vx, (collections.Sequence, np.ndarray)): if isinstance(vy, (collections.Sequence, np.ndarray)): if test_list(list(vx),list(vy)): continue else: return False else: return False if x != y: return False return True db_path = 'test.db' engine = create_engine('sqlite:///./'+db_path, echo=False) # if we want spam # Establishing a session Session = sessionmaker(bind=engine) session = Session() query = session.query(Keywords).filter(not_(Keywords.value.is_(None))) print(query.all()) query = session.query(Keywords.name).filter(Keywords.value.is_(None)) print(query.all()) ############################################################################################ # Usefull stuff ############################################################################################ #=============================================================================# # keywords #=========================================================# # all keywords print_caption("all keywords") query = session.query(Keywords.name, Keywords.value)\ .select_from(Keywords)\ .filter(not_(Keywords.value.is_(None))) result = query.all() print(result) print("len: ", len(result)) #=========================================================# # unique tags print_caption("unqiue keywords") query = session.query(Keywords.name, Keywords.value)\ .distinct()\ .filter(not_(Keywords.value.is_(None)))\ result = query.all() unique_keywords = collections.defaultdict(list) for k,v in result: unique_keywords[k].append(v) print(unique_keywords) print("len: ", len(unique_keywords)) import itertools unique_keywords_v2 = dict((k, list(zip(*v))[1]) for k,v in itertools.groupby(result, lambda x: x[0])) print(unique_keywords_v2) print("len: ", len(unique_keywords_v2)) assert test_dict(unique_keywords, unique_keywords_v2) #=========================================================# # OLD VERSION print_caption("OLD api keywords") q = session.query(Keywords) keywords = [e.name for e in q.filter(Keywords.value != None).all()] key_dict = {} for k in np.unique(keywords): key_dict[k] = np.unique([e.value for e in q.filter(Keywords.value != None, Keywords.name == k).all()]) print(key_dict) print("len: ", len(key_dict)) #=========================================================# # unique tags per api print_caption("api keywords") api_keywords = api.get_keywords(db_path=db_path) print(api_keywords) print("len: ", len(api_keywords)) assert test_dict(api_keywords, key_dict) assert test_dict(api_keywords, unique_keywords) #=============================================================================# # tags #=========================================================# # all tags print_caption("all tags") query = session.query(Keywords.name).select_from(Keywords).filter(Keywords.value.is_(None)) all_tags = query.all() print(all_tags) print("len: ", len(all_tags)) #=========================================================# # unique tags print_caption("unqiue tags") query = session.query(distinct(Keywords.name)).select_from(Keywords).filter(Keywords.value.is_(None)) result = query.all() unique_tags = list(zip(*result))[0] print(unique_tags) print("len: ", len(unique_tags)) assert len(unique_tags) == len(list(set(unique_tags))), "tags are not unqiue" #=========================================================# # unique tags per api print_caption("api tags") api_tags = api.get_tags(db_path=db_path) print(api_tags) print("len: ", len(api_tags)) assert test_list(api_tags, unique_tags) session.close()
from .html import HtmlParser, ParagraphHtmlSelector, SumulaHtmlSelector, EnciclopediaHtmlSelector from .iudicium import IudiciumParser from .pdf import PdfParser law_parser = HtmlParser(ParagraphHtmlSelector(), 'planalto') sumula_parser = HtmlParser(SumulaHtmlSelector(), 'stf') enciclopedia_parser = HtmlParser(EnciclopediaHtmlSelector(), 'puc', True) iudicium_parser = IudiciumParser() pdf_parser = PdfParser() mlm_parsers = [law_parser, sumula_parser, enciclopedia_parser, iudicium_parser, pdf_parser] mlm_parsers = [pdf_parser] sts_parsers = []
from setuptools import setup, find_packages version = '0.4.1' long_description = """ JSON-RPC Parts is a library of composable components one would need to assemble a JSON-RPC server or client. The parts provided are JSON-RPC message parser and serializer, a generic request handler collection, a WSGI-specific request handler and bits and pieces. This JSON-RPC Parts collection supports both, JSON-RPC v.1.0 and v.2.0 including "batch" mode for v.2.0. The parts are split into separate modules that can be used separately from this collection. Since this collection is MIT-licensed, you are free grab a part of this code and use it in alsmost any. """ project_home = 'http://github.com/dvdotsenko/jsonrpc.py' if __name__ == "__main__": setup( name='jsonrpcparts', description='JSON-RPC client and server components', long_description=long_description, version=version, author='Daniel Dotsenko', author_email='dotsa@hotmail.com', url=project_home, download_url=project_home+'/tarball/master', classifiers=[ "Development Status :: 4 - Beta", "Environment :: Web Environment", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Topic :: Internet", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application" ], keywords = ['JSON', 'jsonrpc', 'rpc', 'wsgi'], license='MIT', packages=find_packages(), include_package_data=True, install_requires=['requests'] ) # Next: # python setup.py register # python setup.py sdist upload
import typing as t import unittest from itertools import chain import gql_alchemy.schema as s from gql_alchemy.executor import Executor, Resolver, SomeResolver from gql_alchemy.utils import PrimitiveType class ListQuery: def __init__(self, iterable: t.Iterable[t.Any]) -> None: self.__iterable = iterable def attr(self, name: str) -> 'ListQuery': return ListQuery((self.__getattr(i, name) for i in self.__iterable)) def filter(self, cond: t.Callable[[t.Any], bool]) -> 'ListQuery': return ListQuery((i for i in self.__iterable if cond(i))) def eq(self, attr: str, value: t.Any) -> 'ListQuery': return self.filter(lambda i: self.__getattr(i, attr) == value) def neq(self, attr: str, value: t.Any) -> 'ListQuery': return self.filter(lambda i: self.__getattr(i, attr) != value) def map(self, func: t.Callable[[t.Any], t.Any]) -> 'ListQuery': return ListQuery((func(i) for i in self.__iterable)) def keys(self) -> 'ListQuery': return self.map(lambda i: set(i.keys())) def values(self) -> 'ListQuery': return self.map(lambda i: list(i.values())) def select(self, *names: str) -> 'ListQuery': def gen() -> t.Iterable[t.Any]: for i in self.__iterable: new_i = {} for f in names: new_i[f] = self.__getattr(i, f) yield new_i return ListQuery(gen()) def flatten(self) -> 'ListQuery': return ListQuery(chain.from_iterable(self.__iterable)) def list(self) -> t.List[t.Any]: return list(self.__iterable) def set(self) -> t.MutableSet[t.Any]: return set(self.__iterable) def is_empty(self) -> bool: return len(self.list()) == 0 def count(self) -> int: count = 0 for _ in self.__iterable: count += 1 return count @staticmethod def __getattr(item: t.Any, attr: str) -> t.Any: if isinstance(item, dict): return item[attr] return getattr(item, attr) class IntrospectionTest(unittest.TestCase): def query(self, schema: s.Schema, query: str, query_resolver: SomeResolver, mutation_resolver: t.Optional[SomeResolver] = None, variables: t.Optional[t.Mapping[str, PrimitiveType]] = None, op_name: t.Optional[str] = None) -> PrimitiveType: e = Executor(schema, query_resolver, mutation_resolver) return e.query(query, variables if variables is not None else {}, op_name) def test_scalars(self) -> None: class Query(Resolver): foo = "foo" result = self.query( s.Schema( [ ], s.Object("Query", { "foo": s.String, }) ), """ { __schema { types { kind name description fields { name } interfaces { name } possibleTypes { name } enumValues { name } inputFields { name } ofType { name } }}} """, Query() ) scalars = ListQuery([result]).attr("__schema").attr("types").flatten().eq("kind", "SCALAR").list() self.assertEqual( {'SCALAR'}, ListQuery(scalars).attr("kind").set() ) self.assertEqual( {'Boolean', 'Int', 'Float', 'ID', 'String'}, ListQuery(scalars).attr("name").set() ) self.assertEqual( {None}, ListQuery(scalars).select( "fields", "interfaces", "possibleTypes", "enumValues", "inputFields", "ofType" ).values().flatten().set() ) self.assertEqual( {"Standard type"}, ListQuery(scalars).attr("description").set() ) def test_object(self) -> None: class Query(Resolver): foo = "foo" result = self.query( s.Schema( [ ], s.Object("Query", { "foo": s.String, }) ), """ { __schema { types { kind name description fields { name } interfaces { name } possibleTypes { name } enumValues { name } inputFields { name } ofType { name } }}} """, Query() ) objects = ListQuery([result]).attr("__schema").attr("types").flatten().eq("kind", "OBJECT").list() self.assertEqual( {'OBJECT'}, ListQuery(objects).attr("kind").set() ) self.assertEqual( {'Query'}, ListQuery(objects).attr("name").set() ) self.assertEqual( {"foo"}, ListQuery(objects).attr("fields").flatten().attr("name").set() ) self.assertTrue( ListQuery(objects).attr("interfaces").flatten().is_empty() ) self.assertEqual( {None}, ListQuery(objects).select( "possibleTypes", "enumValues", "inputFields", "ofType" ).values().flatten().set() ) def test_query_type(self) -> None: class Query(Resolver): foo = "foo" result = self.query( s.Schema( [ ], s.Object("Query", { "foo": s.String, }) ), """ { __schema { queryType { kind name description fields { name } interfaces { name } possibleTypes { name } enumValues { name } inputFields { name } ofType { name } }}} """, Query() ) objects = ListQuery([result]).attr("__schema").attr("queryType").list() self.assertEqual( {'OBJECT'}, ListQuery(objects).attr("kind").set() )
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from subprocess import check_output # call command line # from subprocess import Popen, PIPE import os # make directories, change current dir, etc import platform # macOS or windows import shutil # move and delete files/folders from glob import glob # get contents of folder from jinja2 import Environment, FileSystemLoader # html templating from collections import OrderedDict # put toc in alphebetical order # from pprint import pprint from colorama import Fore,Back import pathlib from pprint import pprint # import markdown import pygments from nbconvert import HTMLExporter import nbformat from slurm.files import rm, mkdir, find from string import printable # filter hiden unicode devnull = open(os.devnull, 'w') root = path = str(pathlib.Path().absolute()) SKIP_EXT = [ ".bag", ".h5", ".pickle", ".gz", ".heic", ".old", ".old2", ".caffemodel", ".pnm", ".ps", ".html", ".try", ".jinja2" ] SKIP_FOLDERS = ['old', 'do_not_backup', 'deleteme', 'large_dataset', 'draft', "__pycache__", ".ipynb_checkpoints", "archive", "blog"] class Jupyter: def __init__(self, template): self.exporter = HTMLExporter() self.exporter.template_name = 'base' self.exporter.theme = "light" self.anchor_link_text = "" self.template = template def get_template_names(self): return self.exporter.get_template_names() def defaults(self): pprint(html_exporter.trait_values()) def extractOneTag(self, text, tag): """ Given a tag, this will return what is inside of it. """ return text[text.find("<"+tag+">") + len("<"+tag+">"):text.find("</"+tag+">")] def to_html(self, dest, file, to_main): """ handle a jupyter notebook """ (html, resources) = self.exporter.from_filename(file) # pprint(resources) html = self.extractOneTag(html, "body") html = self.template.render(info=html, path=to_main) fname, ext = os.path.splitext(file) htmlfile = f'{fname}.html' with open(f"{dest}/{fname}.html", 'w') as fd: fd.write(html) print(f"{Fore.GREEN}>> Jupyter: {htmlfile}{Fore.RESET}") class Markdown: def __init__(self, template): self.template = template def filter_unicode(self, md): """ Sometimes I have (historically) been bitten by invisible (unprintable) unicode sneaking into my work causing issues. This is far less common today, but just in case, this filters it out. """ return ''.join(char for char in md if char in printable) def extractOneTag(self, text, tag): """ Given a tag, this will return what is inside of it. """ return text[text.find("<"+tag+">") + len("<"+tag+">"):text.find("</"+tag+">")] def to_html(self, dest, file, to_main): """ Generate the html and save to disk """ # yaml: sets up date/title ... not sure it is worth it # footnotes: something[^1] ... [^1]: http://somewhere.com # emoji: why not? EXTENTIONS = "markdown\ +yaml_metadata_block\ +footnotes\ +emoji".replace(" ","") TEMPLATE = f"{root}/source/template.markdown.pandoc" # generate the body of the html html = run(f"pandoc -f {EXTENTIONS} --template {TEMPLATE} -t html5 {file}") html = self.template.render(info=html, path=to_main) fname, ext = os.path.splitext(file) with open(f"{dest}/{fname}.html", 'w') as fd: fd.write(html) print(f"{Fore.MAGENTA}>> Markdown: {fname}.html{Fore.RESET}") def run(cmd): # given a command string, it runs it cmd = cmd.split() return check_output(cmd).decode("utf8") def rmdir(path): if not isinstance(path, list): path = [path] for p in path: try: shutil.rmtree(p) # print(p) except FileNotFoundError: # folder was already deleted or doesn't exist ... it's ok pass # exit(0) def pandoc(file, dest, template=None, format='html', to_main='.'): """ dest - where html blog goes template - html template format - doing html to_main - get back to source directory. need to keep track of folder stucture for navigation bar across the top of the page """ # handle files if os.path.isfile(file): try: f, ext = os.path.splitext(file) except Exception: print(f'{Fore.RED}*** this file has bad name: {file} ***{Fore.RESET}') exit(1) ext = ext.lower() if ext in ['.md']: # convert markdown to html mark.to_html(dest, file, to_main) elif ext == ".rst": run(f"pandoc --from rst --to markdown -o {file}.md.try {file}") run(f"mv {file} {file}.old") elif ext == '.ipynb': # generate jupyter to html jup.to_html(dest, file, to_main) elif ext in SKIP_EXT: # print(f"{Fore.RED}*** {file}: won't copy to website ***{Fore.RESET}") pass else: path = dest + '/' + file # print(f'{Fore.CYAN}>> Copying file {file}{Fore.RESET}') shutil.copy(file, path) # let's handle directories elif os.path.isdir(file): if file.lower() in SKIP_FOLDERS: print(f'{Fore.YELLOW}>> Skipping folder {file}{Fore.RESET}') return # this must be a directory, let's change into it if to_main == "./..": print(f'==[{file:15}] ===============================') # make the destination path have the same folder path = dest + '/' + file mkdir(path) # change into it, get the files and recursively call pandoc os.chdir(file) files = glob('*') for f in files: pandoc(f, '../' + dest + '/' + file, template, format, to_main=to_main + '/..') os.chdir('../') else: print('***********************************') print(f'*** Unknown File Type: {file}') print('***********************************') # raise Exception() def build_pages(template): # delete the old website so we don't miss anything when building print('Cleaning out old html ------------------') rmdir('html') mkdir('html') # change into source and recursively build website os.chdir("source") # grab files and folders files = glob("*") files.sort() # don't try to build html from the template, we use it another way! files.remove('template.jinja2') # for each file/directory in sourece build it into pdf or copy it for f in files: pandoc(f, '../html', template, 'html') # done os.chdir('..') def getSubDir(path): # print(f"{Fore.CYAN}>> {path}{Fore.RESET}") files = {} # os.chdir(path) fs = find(path,"*.html") for f in fs: bname = os.path.basename(f) name, ext = os.path.splitext(bname) name = name.replace('-', ' ').replace('_', ' ').title() # name = name.replace('-', ' ').replace('_', ' ') # print(name,f) files[name] = str(f).split("html/")[1] # return files return OrderedDict(sorted(files.items())) def getDir(path): """ Get and return a list of files and directories in this path """ # print(f"{Fore.GREEN}>> {path}{Fore.RESET}") objs = glob(path) objs.sort() dirs = [] for o in objs: if os.path.isdir(o): # don't save these folders if o.find('pics') >= 0 or o.find('static') >= 0: pass else: dirs.append(o) # elif os.path.isfile(o): # files.append(o) # else: # print(f"{Fore.RED}*** Unknown: {o} ***{Fore.RESET}") return dirs def build_toc2(path, template): toc = {} dirs = getDir(path + "/*") # print(dirs) for d in dirs: dd = os.path.basename(d).replace('-', ' ').replace('_', ' ').title() # dd = os.path.basename(d).replace('-', ' ').replace('_', ' ') toc[dd] = getSubDir(d) toc = OrderedDict(sorted(toc.items())) html = template.render(TOC=toc, path='.') with open('html/topics.html', 'w') as fd: fd.write(html) print(f"{Fore.CYAN}>> Made topics.html{Fore.RESET}") # pprint(toc) jup = None mark = None if __name__ == "__main__": # clean up the input rm(find("./",".DS_Store")) rm(find("./","deleteme")) rmdir(find("./",".ipynb_checkpoints")) rmdir(find("./","__pycache__")) fs = FileSystemLoader('./source') template = Environment(loader=fs, trim_blocks=True).get_template('template.jinja2') jup = Jupyter(template) mark = Markdown(template) build_pages(template) # build_toc2("html/blog",template)
"""This module contains the general information for BiosPlatformDefaults ManagedObject.""" from ...imcmo import ManagedObject from ...imccoremeta import ImcVersion, MoPropertyMeta, MoMeta from ...imcmeta import VersionMeta class BiosPlatformDefaultsConsts(): pass class BiosPlatformDefaults(ManagedObject): """This is BiosPlatformDefaults class.""" consts = BiosPlatformDefaultsConsts() naming_props = set([]) mo_meta = MoMeta("BiosPlatformDefaults", "biosPlatformDefaults", "bios-defaults", VersionMeta.Version152, "OutputOnly", 0xf, [], ["admin", "read-only", "user"], [u'biosUnit'], [u'biosVfASPMSupport', u'biosVfAdjacentCacheLinePrefetch', u'biosVfAltitude', u'biosVfAssertNMIOnPERR', u'biosVfAssertNMIOnSERR', u'biosVfBootOptionRetry', u'biosVfCDNEnable', u'biosVfCDNSupport', u'biosVfCPUEnergyPerformance', u'biosVfCPUFrequencyFloor', u'biosVfCPUPerformance', u'biosVfCPUPowerManagement', u'biosVfCkeLowPolicy', u'biosVfConsoleRedirection', u'biosVfCoreMultiProcessing', u'biosVfDCUPrefetch', u'biosVfDRAMClockThrottling', u'biosVfDemandScrub', u'biosVfDirectCacheAccess', u'biosVfDramRefreshRate', u'biosVfEnhancedIntelSpeedStepTech', u'biosVfExecuteDisableBit', u'biosVfExtendedAPIC', u'biosVfFRB2Enable', u'biosVfHardwarePrefetch', u'biosVfIOHResource', u'biosVfIntelHyperThreadingTech', u'biosVfIntelTurboBoostTech', u'biosVfIntelVTForDirectedIO', u'biosVfIntelVirtualizationTechnology', u'biosVfLOMPortOptionROM', u'biosVfLegacyUSBSupport', u'biosVfLvDIMMSupport', u'biosVfMMCFGBase', u'biosVfMemoryInterleave', u'biosVfMemoryMappedIOAbove4GB', u'biosVfMirroringMode', u'biosVfNUMAOptimized', u'biosVfOSBootWatchdogTimer', u'biosVfOSBootWatchdogTimerPolicy', u'biosVfOSBootWatchdogTimerTimeout', u'biosVfOnboardNIC', u'biosVfOnboardStorage', u'biosVfOnboardStorageSWStack', u'biosVfOutOfBandMgmtPort', u'biosVfPCIOptionROMs', u'biosVfPCISlotOptionROMEnable', u'biosVfPOSTErrorPause', u'biosVfPStateCoordType', u'biosVfPackageCStateLimit', u'biosVfPatrolScrub', u'biosVfPatrolScrubDuration', u'biosVfPchUsb30Mode', u'biosVfPciRomClp', u'biosVfProcessorC1E', u'biosVfProcessorC3Report', u'biosVfProcessorC6Report', u'biosVfProcessorCState', u'biosVfPwrPerfTuning', u'biosVfQPIConfig', u'biosVfQpiSnoopMode', u'biosVfSataModeSelect', u'biosVfSelectMemoryRASConfiguration', u'biosVfSerialPortAEnable', u'biosVfSparingMode', u'biosVfSrIov', u'biosVfTPMSupport', u'biosVfUCSMBootOrderRuleControl', u'biosVfUSBBootConfig', u'biosVfUSBEmulation', u'biosVfUSBPortsConfig', u'biosVfVgaPriority', u'biosVfWorkLoadConfig'], ["Get"]) prop_meta = { "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version152, MoPropertyMeta.INTERNAL, None, None, None, None, [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version152, MoPropertyMeta.READ_ONLY, 0x2, 0, 255, None, [], []), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version152, MoPropertyMeta.READ_ONLY, 0x4, 0, 255, None, [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version152, MoPropertyMeta.READ_ONLY, 0x8, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []), } prop_map = { "childAction": "child_action", "dn": "dn", "rn": "rn", "status": "status", } def __init__(self, parent_mo_or_dn, **kwargs): self._dirty_mask = 0 self.child_action = None self.status = None ManagedObject.__init__(self, "BiosPlatformDefaults", parent_mo_or_dn, **kwargs)
from tools.load import loadFile from speech import assistantResponse from voiceRecognize import listen from intents.timedate import timeDate_intent as timeDate from intents.weather import wheather_intent as weather from intents.assistant import assistant_intent as assistant import random, os, sys from multiprocessing import Process loadModel = loadFile('models/dictionary.json') Greeting_Response = loadModel['greetings']['responses'] cancle = ["ok!", "no problem", ""] cancelWords = ['cancel', 'thanks'] intents = [weather.callActions(), timeDate.callActions(), assistant.callActions()] STT = listen() #obj to convert speech to text def intentCall(): assistantResponse(random.choice(Greeting_Response)) count = 0 while count<3: text = STT.listening() if len(text) > 0: for cancelWord in cancelWords: if cancelWord in text: return random.choice(cancle) intent = getIntent(text) assistantResponse(intent) count = 1 elif count == 0: assistantResponse(random.choice(Greeting_Response)) count += 1 return "" def getIntent(text): for intent in intents: if text in intent[0]: return intent[1] return "could not understand"
""" Copyright © 2022 Armağan Salman Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ """ +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ > WHY To change with flexibility the file names under one directory/folder. """ import os from typing import Tuple # Can't use tuple[list[str], str] for Python ver. below 3.8 ; must use typing.Tuple from typing import List from typing import Callable FName_t = str DirName_t = str NameDir_t = Tuple[FName_t, DirName_t] MultiNameDir_t = Tuple[List[FName_t], DirName_t] ChangedName_t = FName_t NameGenFun_t = Callable[[NameDir_t], ChangedName_t] def names_under_dir(dir_path: str): assert(os.path.isdir(dir_path)) names = os.listdir(dir_path) return (names, dir_path) ### def combine_folder_filename(folder: str, filename: str) -> str: combined = os.path.join( folder, filename ) if os.path.isfile(combined) or os.path.isdir(combined): return os.path.abspath(combined) # else: err = "{} not a file or directory.".format(combined) raise Exception(err) # ### def bulk_generate_names(names_dir: MultiNameDir_t, name_generator: NameGenFun_t) \ -> MultiNameDir_t: # many_names: List[str] = names_dir[0] folder: str = names_dir[1] generated_names = [] for name in many_names: name_dir = (name, folder) new_name: ChangedName_t = name_generator(name_dir) generated_names.append(new_name) # return (generated_names, folder) ### def crop_from_start(str_from_start: str) -> NameGenFun_t: def cropper(arg: NameDir_t) -> str: if arg[0].startswith(str_from_start): crop_len = len(str_from_start) return arg[0][crop_len:] # else: return arg[0] # ### return cropper ### def add_str_to_first_word(to_append: str) -> NameGenFun_t: def concatter(arg: NameDir_t): name = arg[0] splitter = ' ' parts = name.split(splitter) first_word = parts[0] added = first_word + str(to_append) parts[0] = added return splitter.join(parts) ### return concatter # dirp = "." dirp = "C:\\Users\\armagan\\Desktop\\Temp items\\music change name" mult_name_dir = names_under_dir(dirp) old_names = mult_name_dir[0] generated_namesdir = bulk_generate_names(mult_name_dir, crop_from_start("9convert.com - ")) """ generated = generated_namesdir[0] folder = generated_namesdir[1] new_names = generated """ """ for i in range(len(generated)): orig, genr = old_names[i], new_names[i] print("+++++++++++++++++++++++++++++") print("Folder: {}".format(folder)) print("old -> new preview = <<{}>> # <<{}>>".format(orig, genr)) # """ generated_namesdir2 = bulk_generate_names(generated_namesdir, add_str_to_first_word(",4")) generated = generated_namesdir2[0] folder = generated_namesdir2[1] new_names = generated assert(len(old_names) == len(new_names)) for i in range(len(generated)): orig, genr = old_names[i], new_names[i] print("+++++++++++++++++++++++++++++") print("Folder: {}".format(folder)) print("old -> new preview = <<{}>> ### <<{}>>".format(orig, genr)) #
from pytorch3d.renderer import RasterizationSettings from pytorch3d.renderer import MeshRasterizer class Rasterizer: def init_rasterizer(self,cameras,image_size=512, blur_radius=0.0, faces_per_pixel=1): raster_settings = RasterizationSettings(image_size=image_size, blur_radius=blur_radius, faces_per_pixel=faces_per_pixel) self.rasterizer = MeshRasterizer(cameras=cameras,raster_settings=raster_settings).cpu()
import os import pandas as pd from nltk import word_tokenize import gensim data = pd.read_json(os.path.join("scraper_siglas-uc", "outputs", "programas_clean.json"), orient="table") programas = data["programa"].apply(lambda x: word_tokenize(x)).to_list() diccionario = gensim.corpora.Dictionary(programas) corpus = [diccionario.doc2bow(programa) for programa in programas] model = gensim.models.LsiModel(corpus, 203, diccionario) index = gensim.similarities.MatrixSimilarity(model[corpus]) index.save(os.path.join("modelo", "outputs", "index.index")) diccionario.save(os.path.join("modelo", "outputs", "diccionario.dict")) model.save(os.path.join("modelo", "outputs", "model.model"))
import random import string from cancel_token import CancelToken from eth_utils import ( to_bytes, keccak, ) from eth_keys import keys from p2p import discovery from p2p import kademlia def random_address(): return kademlia.Address( '10.0.0.{}'.format(random.randint(0, 255)), random.randint(0, 9999)) def random_node(): seed = to_bytes(text="".join(random.sample(string.ascii_lowercase, 10))) priv_key = keys.PrivateKey(keccak(seed)) return kademlia.Node(priv_key.public_key, random_address()) def get_discovery_protocol(seed=b"seed", address=None): privkey = keys.PrivateKey(keccak(seed)) if address is None: address = random_address() return discovery.DiscoveryProtocol(privkey, address, [], CancelToken("discovery-test"))
########################### # # #446 Retractions B - Project Euler # https://projecteuler.net/problem=446 # # Code by Kevin Marciniak # ###########################
# (C) Copyright 2020 ECMWF. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. # In applying this licence, ECMWF does not waive the privileges and immunities # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. # import datetime import logging import eccodes from . import Reader from climetlab.utils.bbox import BoundingBox LOG = logging.getLogger(__name__) # return eccodes.codes_new_from_file(self.file, eccodes.CODES_PRODUCT_GRIB) class CodesHandle: def __init__(self, handle, path, offset): self.handle = handle self.path = path self.offset = offset def __del__(self): eccodes.codes_release(self.handle) def get(self, name): try: if name == "values": return eccodes.codes_get_values(self.handle) return eccodes.codes_get(self.handle, name) except eccodes.KeyValueNotFoundError: return None class CodesReader: def __init__(self, path): self.path = path self.file = open(self.path, "rb") def __del__(self): try: self.file.close() except Exception: pass def at_offset(self, offset): self.file.seek(offset, 0) return self.__next__() def __iter__(self): return self def __next__(self): offset = self.file.tell() handle = eccodes.codes_new_from_file(self.file, eccodes.CODES_PRODUCT_GRIB) if not handle: raise StopIteration() return CodesHandle(handle, self.path, offset) class GribField: def __init__(self, handle, path): self.handle = handle self.path = path def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): # Place to suppress exceptions (don't reraise the passed-in exception, it is the caller's responsibility) pass @property def values(self): return self.handle.get("values") @property def offset(self): return int(self.handle.get("offset")) @property def shape(self): return self.handle.get("Nj"), self.handle.get("Ni") def plot_map(self, driver): driver.bounding_box( north=self.handle.get("latitudeOfFirstGridPointInDegrees"), south=self.handle.get("latitudeOfLastGridPointInDegrees"), west=self.handle.get("longitudeOfFirstGridPointInDegrees"), east=self.handle.get("longitudeOfLastGridPointInDegrees"), ) driver.plot_grib(self.path, self.handle.get("offset")) def to_numpy(self): return self.values.reshape(self.shape) def __repr__(self): return "GribField(%s,%s,%s,%s,%s,%s)" % ( self.handle.get("shortName"), self.handle.get("levelist"), self.handle.get("date"), self.handle.get("time"), self.handle.get("step"), self.handle.get("number"), ) def _grid_definition(self): return dict( north=self.handle.get("latitudeOfFirstGridPointInDegrees"), south=self.handle.get("latitudeOfLastGridPointInDegrees"), west=self.handle.get("longitudeOfFirstGridPointInDegrees"), east=self.handle.get("longitudeOfLastGridPointInDegrees"), south_north_increment=self.handle.get("jDirectionIncrementInDegrees"), west_east_increment=self.handle.get("iDirectionIncrementInDegrees"), ) def field_metadata(self): m = self._grid_definition() for n in ("shortName", "units", "paramId"): p = self.handle.get(n) if p is not None: m[n] = str(p) m["shape"] = self.shape return m def helper(self): return self def datetime(self): date = self.handle.get("date") time = self.handle.get("time") return datetime.datetime( date // 10000, date % 10000 // 100, date % 100, time // 100, time % 100 ) def valid_datetime(self): step = self.handle.get("endStep") return self.datetime() + datetime.timedelta(hours=step) def to_datetime_list(self): return [self.valid_datetime()] def to_bounding_box(self): return BoundingBox( north=self.handle.get("latitudeOfFirstGridPointInDegrees"), south=self.handle.get("latitudeOfLastGridPointInDegrees"), west=self.handle.get("longitudeOfFirstGridPointInDegrees"), east=self.handle.get("longitudeOfLastGridPointInDegrees"), ) class GRIBIterator: def __init__(self, path): self.path = path self.reader = CodesReader(path) def __repr__(self): return "GRIBIterator(%s)" % (self.path,) def __next__(self): return GribField(next(self.reader), self.path) class GRIBReader(Reader): def __init__(self, source, path): super().__init__(source, path) self._fields = None self._reader = None def __repr__(self): return "GRIBReader(%s)" % (self.path,) def __iter__(self): return GRIBIterator(self.path) def _items(self): if self._fields is None: self._fields = [] for f in self: self._fields.append(f.offset) return self._fields def __getitem__(self, n): if self._reader is None: self._reader = CodesReader(self.path) return GribField(self._reader.at_offset(self._items()[n]), self.path) def __len__(self): return len(self._items()) def to_xarray(self): import xarray as xr return xr.open_dataset(self.path, engine="cfgrib")
from cvxopt import matrix, spmatrix, mul, div # NOQA from .base import ModelBase from ..consts import Fx0, Fy0, Gx0, Gy0 # NOQA class Node(ModelBase): """ DC node class""" def __init__(self, system, name): super().__init__(system, name) self._group = 'Topology' self._name = 'Node' self.param_remove('Sn') self.param_remove('Vn') self._params.extend([ 'Vdcn', 'Idcn', 'voltage', ]) self._descr.update({ 'Vdcn': 'DC voltage rating', 'Idcn': 'DC current rating', 'voltage': 'Initial nodal voltage guess', 'area': 'Area code', 'region': 'Region code', 'xcoord': 'x coordinate', 'ycoord': 'y coordinate' }) self._data.update({ 'Vdcn': 100.0, 'Idcn': 10.0, 'area': 0, 'region': 0, 'voltage': 1.0, 'xcoord': None, 'ycoord': None, }) self._units.update({ 'Vdcn': 'kV', 'Idcn': 'kA', 'area': 'na', 'region': 'na', 'voltage': 'pu', 'xcoord': 'deg', 'ycoord': 'deg', }) self.calls.update({ 'init0': True, 'pflow': True, 'jac0': True, }) self._mandatory = ['Vdcn'] self._zeros = ['Vdcn', 'Idcn'] # self.v = list() self._algebs.extend(['v']) self._fnamey.extend(['V_{dc}']) self._init() def elem_add(self, idx=None, name=None, **kwargs): super().elem_add(idx, name, **kwargs) def _varname(self): if not self.n: return self.system.varname.append( listname='unamey', xy_idx=self.v, var_name='Vdc', element_name=self.name) self.system.varname.append( listname='fnamey', xy_idx=self.v, var_name='V_{dc}', element_name=self.name) def setup(self): self._param_to_matrix() def init0(self, dae): dae.y[self.v] = matrix(self.voltage, (self.n, 1), 'd') def jac0(self, dae): if self.n == 0: return dae.add_jac(Gy0, -1e-6, self.v, self.v) class DCBase(ModelBase): """Two-terminal DC device base""" def __init__(self, system, name): super().__init__(system, name) self._group = 'DCBasic' self._params.remove('Sn') self._params.remove('Vn') self._data.update({ 'node1': None, 'node2': None, 'Vdcn': 100.0, 'Idcn': 10.0, }) self._params.extend(['Vdcn', 'Idcn']) self._descr.update({ 'Vdcn': 'DC voltage rating', 'Idcn': 'DC current rating', 'node1': 'DC node 1 idx', 'node2': 'DC node 2 idx', }) self._units.update({ 'Vdcn': 'kV', 'Idcn': 'A', }) self._dc = { 'node1': 'v1', 'node2': 'v2', } self._mandatory.extend(['node1', 'node2', 'Vdcn']) @property def v12(self): return self.system.dae.y[self.v1] - self.system.dae.y[self.v2] class R(DCBase): """DC Resistence line class""" def __init__(self, system, name): super().__init__(system, name) self._name = 'R' self._params.extend(['R']) self._data.update({ 'R': 0.01, }) self._r.extend(['R']) self.calls.update({ 'pflow': True, 'gcall': True, 'jac0': True, }) self._algebs.extend(['Idc']) self._fnamey = ['I_{dc}'] self._init() def gcall(self, dae): dae.g[self.Idc] = div(self.v12, self.R) + dae.y[self.Idc] dae.g -= spmatrix(dae.y[self.Idc], self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g += spmatrix(dae.y[self.Idc], self.v2, [0] * self.n, (dae.m, 1), 'd') def jac0(self, dae): dae.add_jac(Gy0, -self.u, self.v1, self.Idc) dae.add_jac(Gy0, self.u, self.v2, self.Idc) dae.add_jac(Gy0, div(self.u, self.R), self.Idc, self.v1) dae.add_jac(Gy0, -div(self.u, self.R), self.Idc, self.v2) dae.add_jac(Gy0, self.u - 1e-6, self.Idc, self.Idc) class L(DCBase): """Pure inductive line""" def __init__(self, system, name): super(L, self).__init__(system, name) self._name = 'L' self._data.update({'L': 0.001}) self._params.extend(['L']) self._r.extend(['L']) self._algebs.extend(['Idc']) self._fnamey.extend(['I_{dc}']) self._states.extend(['IL']) self._fnamex.extend(['I_L']) self._service.extend(['iL']) self.calls.update({ 'pflow': True, 'init0': True, 'gcall': True, 'fcall': True, 'jac0': True, }) self._init() def servcall(self, dae): self.iL = div(self.u, self.L) def init0(self, dae): self.servcall(dae) def gcall(self, dae): dae.g[self.Idc] = dae.x[self.IL] + dae.y[self.Idc] dae.g -= spmatrix(dae.y[self.Idc], self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g += spmatrix(dae.y[self.Idc], self.v2, [0] * self.n, (dae.m, 1), 'd') def fcall(self, dae): dae.f[self.IL] = mul(self.v12, self.iL) def jac0(self, dae): dae.add_jac(Gx0, self.u, self.Idc, self.IL) dae.add_jac(Gy0, self.u, self.Idc, self.Idc) dae.add_jac(Fy0, self.iL, self.IL, self.v1) dae.add_jac(Fy0, -self.iL, self.IL, self.v2) dae.add_jac(Gy0, -self.u, self.v1, self.Idc) dae.add_jac(Gy0, self.u, self.v2, self.Idc) class C(DCBase): """Pure capacitive line""" def __init__(self, system, name): super(C, self).__init__(system, name) self._name = 'C' self._data.update({'C': 0.001}) self._params.extend(['C']) self._g.extend(['C']) self._algebs.extend(['Idc']) self._fnamey.extend(['I_{dc}']) self._states.extend(['vC']) self._fnamex.extend(['vC']) self._service.extend(['iC']) self.calls.update({ 'pflow': True, 'init0': True, 'gcall': True, 'fcall': True, 'jac0': True, }) self._init() def servcall(self, dae): self.iC = div(self.u, self.C) def init0(self, dae): self.servcall(dae) def gcall(self, dae): dae.g[self.Idc] = dae.x[self.vC] - self.v12 dae.g -= spmatrix(dae.y[self.Idc], self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g += spmatrix(dae.y[self.Idc], self.v2, [0] * self.n, (dae.m, 1), 'd') def fcall(self, dae): dae.f[self.vC] = -mul(dae.y[self.Idc], self.iC) def jac0(self, dae): dae.add_jac(Gx0, self.u, self.Idc, self.vC) dae.add_jac(Gy0, -self.u, self.Idc, self.v1) dae.add_jac(Gy0, self.u, self.Idc, self.v2) dae.add_jac(Gy0, 1e-6, self.Idc, self.Idc) dae.add_jac(Gy0, -self.u, self.v1, self.Idc) dae.add_jac(Gy0, self.u, self.v2, self.Idc) dae.add_jac(Fy0, -self.iC, self.vC, self.Idc) class RLs(DCBase): """DC Resistive and Inductive line""" def __init__(self, system, name): super(RLs, self).__init__(system, name) self._name = 'RLs' self._params.extend(['R', 'L']) self._data.update({ 'R': 0.01, 'L': 0.001, }) self._params.extend(['R', 'L']) self._r.extend(['R', 'L']) self._algebs.extend(['Idc']) self._fnamey.extend(['I_{dc}']) self._states.extend(['IL']) self._fnamex.extend(['I_L']) self.calls.update({ 'pflow': True, 'init0': True, 'gcall': True, 'fcall': True, 'jac0': True, }) self._service.extend(['iR', 'iL']) self._init() def servcall(self, dae): self.iR = div(self.u, self.R) self.iL = div(self.u, self.L) def init0(self, dae): self.servcall(dae) dae.x[self.IL] = mul(self.v12, self.iR) dae.y[self.Idc] = -dae.x[self.IL] def gcall(self, dae): dae.g[self.Idc] = mul(self.u, dae.x[self.IL] + dae.y[self.Idc]) dae.g -= spmatrix( mul(self.u, dae.y[self.Idc]), self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g += spmatrix( mul(self.u, dae.y[self.Idc]), self.v2, [0] * self.n, (dae.m, 1), 'd') def fcall(self, dae): dae.f[self.IL] = mul(self.v12 - mul(self.R, dae.x[self.IL], self.u), self.iL) def jac0(self, dae): dae.add_jac(Gx0, self.u, self.Idc, self.IL) dae.add_jac(Gy0, self.u + 1e-6, self.Idc, self.Idc) dae.add_jac(Gy0, -self.u, self.v1, self.Idc) dae.add_jac(Gy0, self.u, self.v2, self.Idc) dae.add_jac(Fx0, -mul(self.R, self.iL, self.u) + 1e-6, self.IL, self.IL) dae.add_jac(Fy0, mul(self.u, self.iL), self.IL, self.v1) dae.add_jac(Fy0, -mul(self.u, self.iL), self.IL, self.v2) class RCp(DCBase): """RC parallel line""" def __init__(self, system, name): super(RCp, self).__init__(system, name) self._name = 'RCp' self._params.extend(['R', 'C']) self._data.update({ 'R': 0.01, 'C': 0.001, }) self._params.extend(['R', 'C']) self._r.extend(['R']) self._g.extend(['C']) self._algebs.extend(['Idc']) self._fnamey.extend(['I_{dc}']) self._states.extend(['vC']) self._fnamex.extend(['v_C']) self.calls.update({ 'pflow': True, 'init0': True, 'gcall': True, 'fcall': True, 'jac0': True, }) self._service.extend(['iR', 'iC']) self._init() def servcall(self, dae): self.iR = div(self.u, self.R) self.iC = div(self.u, self.C) def init0(self, dae): self.servcall(dae) dae.x[self.vC] = self.v12 dae.y[self.Idc] = -mul(self.v12, self.iR) def gcall(self, dae): dae.g[self.Idc] = dae.x[self.vC] - self.v12 dae.g -= spmatrix(dae.y[self.Idc], self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g += spmatrix(dae.y[self.Idc], self.v2, [0] * self.n, (dae.m, 1), 'd') def fcall(self, dae): dae.f[self.vC] = -mul(dae.y[self.Idc] + mul(dae.x[self.vC], self.iR), self.iC) def jac0(self, dae): dae.add_jac(Gx0, self.u, self.Idc, self.vC) dae.add_jac(Gy0, -self.u, self.Idc, self.v1) dae.add_jac(Gy0, self.u, self.Idc, self.v2) dae.add_jac(Gy0, 1e-6, self.Idc, self.Idc) dae.add_jac(Gy0, -self.u, self.v1, self.Idc) dae.add_jac(Gy0, self.u, self.v2, self.Idc) dae.add_jac(Fy0, -self.iC, self.vC, self.Idc) dae.add_jac(Fx0, -mul(self.iR, self.iC), self.vC, self.vC) class RLCp(DCBase): """RLC parallel line""" def __init__(self, system, name): super(RLCp, self).__init__(system, name) self._name = 'RLCp' self._params.extend(['R', 'L', 'C']) self._data.update({ 'R': 0.01, 'L': 0.001, 'C': 0.001, }) self._params.extend(['R', 'L', 'C']) self._r.extend(['R', 'L']) self._g.extend(['C']) self._algebs.extend(['Idc']) self._fnamey.extend(['I_{dc}']) self._states.extend(['IL', 'vC']) self._fnamex.extend(['I_L', 'v_C']) self.calls.update({ 'pflow': True, 'init0': True, 'gcall': True, 'fcall': True, 'jac0': True, }) self._service.extend(['iR', 'iL', 'iC']) self._init() def servcall(self, dae): self.iR = div(self.u, self.R) self.iL = div(self.u, self.L) self.iC = div(self.u, self.C) def init0(self, dae): self.servcall(dae) dae.x[self.vC] = self.v12 dae.y[self.Idc] = -mul(self.v12, self.iR) def gcall(self, dae): dae.g[self.Idc] = dae.x[self.vC] - self.v12 dae.g -= spmatrix(dae.y[self.Idc], self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g += spmatrix(dae.y[self.Idc], self.v2, [0] * self.n, (dae.m, 1), 'd') def fcall(self, dae): dae.f[self.IL] = mul(dae.x[self.vC], self.iL) dae.f[self.vC] = -mul( dae.y[self.Idc] + mul(dae.x[self.vC], self.iR) + dae.x[self.IL], self.iC) def jac0(self, dae): dae.add_jac(Gx0, self.u, self.Idc, self.vC) dae.add_jac(Gy0, -self.u, self.Idc, self.v1) dae.add_jac(Gy0, self.u, self.Idc, self.v2) dae.add_jac(Gy0, 1e-6, self.Idc, self.Idc) dae.add_jac(Gy0, -self.u, self.v1, self.Idc) dae.add_jac(Gy0, self.u, self.v2, self.Idc) dae.add_jac(Fx0, self.iL, self.IL, self.vC) dae.add_jac(Fy0, -self.iC, self.vC, self.Idc) dae.add_jac(Fx0, -mul(self.iR, self.iC), self.vC, self.vC) dae.add_jac(Fx0, -self.iC, self.vC, self.IL) class RCs(DCBase): """RC series line""" def __init__(self, system, name): super(RCs, self).__init__(system, name) self._name = 'RCs' self._data.update({'R': 0.01, 'C': 0.001}) self._params.extend(['R', 'C']) self._r.extend(['R']) self._g.extend(['C']) self._algebs.extend(['Idc']) self._fnamey.extend(['I_{dc}']) self._states.extend(['vC']) self._fnamex.extend(['vC']) self._service.extend(['iC', 'iR']) self.calls.update({ 'pflow': True, 'init0': True, 'gcall': True, 'fcall': True, 'jac0': True, }) self._init() def servcall(self, dae): self.iC = div(self.u, self.C) self.iR = div(self.u, self.R) def init0(self, dae): self.servcall(dae) def gcall(self, dae): dae.g[self.Idc] = dae.x[self.vC] - self.v12 - mul( dae.y[self.Idc], self.R) dae.g -= spmatrix(dae.y[self.Idc], self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g += spmatrix(dae.y[self.Idc], self.v2, [0] * self.n, (dae.m, 1), 'd') def fcall(self, dae): dae.f[self.vC] = -mul(dae.y[self.Idc], self.iC) def jac0(self, dae): dae.add_jac(Gx0, self.u, self.Idc, self.vC) dae.add_jac(Gy0, -self.u, self.Idc, self.v1) dae.add_jac(Gy0, self.u, self.Idc, self.v2) dae.add_jac(Gy0, 1e-6 - self.R, self.Idc, self.Idc) dae.add_jac(Gy0, -self.u, self.v1, self.Idc) dae.add_jac(Gy0, self.u, self.v2, self.Idc) dae.add_jac(Fy0, -self.iC, self.vC, self.Idc) class RLCs(DCBase): """RLC series""" def __init__(self, system, name): super(RLCs, self).__init__(system, name) self._name = 'RLCs' self._params.extend(['R', 'L', 'C']) self._data.update({'R': 0.01, 'L': 0.001, 'C': 0.001}) self._params.extend(['R', 'L', 'C']) self._r.extend(['R', 'L']) self._g.extend(['C']) self._algebs.extend(['Idc']) self._fnamey.extend(['I_{dc}']) self._states.extend(['IL', 'vC']) self._fnamex.extend(['I_L', 'v_C']) self.calls.update({ 'pflow': True, 'init0': True, 'gcall': True, 'fcall': True, 'jac0': True, }) self._service.extend(['iR', 'iL', 'iC']) self._init() def servcall(self, dae): self.iR = div(self.u, self.R) self.iL = div(self.u, self.L) self.iC = div(self.u, self.C) def init0(self, dae): self.servcall(dae) dae.x[self.vC] = self.v12 def gcall(self, dae): dae.g[self.Idc] = dae.x[self.IL] + dae.y[self.Idc] dae.g -= spmatrix(dae.y[self.Idc], self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g += spmatrix(dae.y[self.Idc], self.v2, [0] * self.n, (dae.m, 1), 'd') def fcall(self, dae): dae.f[self.IL] = mul( self.v12 - mul(self.R, dae.x[self.IL]) - dae.x[self.vC], self.iL) dae.f[self.vC] = mul(dae.x[self.IL], self.iC) def jac0(self, dae): dae.add_jac(Gx0, self.u, self.Idc, self.IL) dae.add_jac(Gy0, self.u, self.Idc, self.Idc) dae.add_jac(Gy0, -self.u, self.v1, self.Idc) dae.add_jac(Gy0, self.u, self.v2, self.Idc) dae.add_jac(Fx0, -mul(self.R, self.iL), self.IL, self.IL) dae.add_jac(Fy0, self.iL, self.IL, self.v1) dae.add_jac(Fy0, -self.iL, self.IL, self.v2) dae.add_jac(Fx0, -self.iL, self.IL, self.vC) dae.add_jac(Fx0, self.iC, self.vC, self.IL) class Ground(DCBase): """DC Ground node""" def __init__(self, system, name): super().__init__(system, name) self.param_remove('node1') self.param_remove('node2') self.param_remove('v') self._data.update({ 'node': None, 'voltage': 0.0, }) self._algebs.extend(['I']) self._unamey = ['I'] self._fnamey = ['I'] self._dc = {'node': 'v'} self._params.extend(['voltage']) self._mandatory.extend(['node']) self.calls.update({ 'init0': True, 'jac0': True, 'gcall': True, 'pflow': True, }) self._init() def init0(self, dae): dae.y[self.v] = self.voltage def gcall(self, dae): dae.g[self.v] -= dae.y[self.I] dae.g[self.I] = self.voltage - dae.y[self.v] def jac0(self, dae): dae.add_jac(Gy0, -self.u, self.v, self.I) dae.add_jac(Gy0, self.u - 1 - 1e-6, self.I, self.I) dae.add_jac(Gy0, -self.u, self.I, self.v) class DCgen(DCBase): """A simple DC generator to impose constantactive power injection""" def __init__(self, system, name): super().__init__(system, name) self._name = 'DCgen' self._params.extend(['P', 'Sn']) self._data.update({'P': 0.0, }) self._descr.update({'P': 'Active power generation'}) self._units.update({'P': 'pu'}) self._powers.extend(['P']) self.calls.update({'pflow': True, 'gcall': True, 'stagen': True, }) self._init() def gcall(self, dae): dae.g -= spmatrix(div(mul(self.u, self.P), self.v12), self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g -= spmatrix(-div(mul(self.u, self.P), self.v12), self.v2, [0] * self.n, (dae.m, 1), 'd') def disable_gen(self, idx): self.u[self.uid[idx]] = 0 self.system.dae.factorize = True class DCload(DCBase): """A simple DC load to impose constant active power consumption""" def __init__(self, system, name): super().__init__(system, name) self._name = 'DCload' self._params.extend(['P', 'Sn']) self._data.update({'P': 0.0, }) self._powers.extend(['P']) self._descr.update({'P': 'Active power load'}) self._units.update({'P': 'pu'}) self.calls.update({'pflow': True, 'gcall': True, }) self._init() def gcall(self, dae): dae.g += spmatrix(div(mul(self.u, self.P), self.v12), self.v1, [0] * self.n, (dae.m, 1), 'd') dae.g += spmatrix(-div(mul(self.u, self.P), self.v12), self.v2, [0] * self.n, (dae.m, 1), 'd') def disable(self, idx): self.u[self.uid[idx]] = 0 self.system.dae.factorize = True
# flake8: noqa import unittest from remember.sql_store import _create_command_search_select_query, Command, _rerank_matches, \ SqlCommandStore REMEMBER_STAR = 'full_command, count_seen, last_used, command_info' class SqlStoreTests(unittest.TestCase): def test_create_select_query_whenSingleTermAll3_ShouldReturnAll3Query(self) -> None: query = _create_command_search_select_query(['grep'], True, True, True) query = ' '.join(query.split()) expected = f"SELECT {REMEMBER_STAR} FROM remember WHERE (full_command LIKE 'grep%' OR " \ "command_info LIKE 'grep%') ORDER BY count_seen DESC, last_used DESC" self.assertEqual(expected, query) def test_create_select_query_whenSingleTermNoSpecial_ShouldReturnBasicQuery(self) -> None: query = _create_command_search_select_query(['grep'], False, False, False) query = ' '.join(query.split()) expected = f"SELECT {REMEMBER_STAR} FROM remember WHERE (full_command LIKE '%grep%')" self.assertEqual(expected, query) def test_create_select_query_whenSingleTermSorted_ShouldReturnBasicSortQuery(self) -> None: query = _create_command_search_select_query(['grep'], False, True, False) query = ' '.join(query.split()) expected = f"SELECT {REMEMBER_STAR} FROM remember WHERE (full_command LIKE " \ f"'%grep%') ORDER BY count_seen DESC, last_used DESC" self.assertEqual(expected, query) def test_create_select_query_whenSingleTermStartsWith_ShouldReturnStartsWithQuery(self) -> None: query = _create_command_search_select_query(['grep'], True, False, False) query = ' '.join(query.split()) expected = f"SELECT {REMEMBER_STAR} FROM remember WHERE (full_command LIKE 'grep%')" self.assertEqual(expected, query) def test_create_select_query_whenSingleTermStartsWithAndSort_ShouldReturnBothQuery(self) -> None: query = _create_command_search_select_query(['grep'], True, True, False) query = ' '.join(query.split()) expected = f"SELECT {REMEMBER_STAR} FROM remember WHERE (full_command LIKE 'grep%') " \ "ORDER BY count_seen DESC, last_used DESC" self.assertEqual(expected, query) def test_rerank_whenMoreTermsInLater_shouldReorderCommands(self) -> None: command_str = 'one two three' c1 = Command(command_str) command_str = 'one match only' c2 = Command(command_str) command_str = 'one two matches in this' c3 = Command(command_str) command_str = 'two matches in this one also' c4 = Command(command_str) matches = [c3, c2, c4, c1] reranked_result = _rerank_matches(matches, ['one', 'two', 'three']) expected = [c1, c3, c4, c2] self.assertListEqual(expected, reranked_result) def test_Command_whenCommandStringIsDot_shouldParseCorrectlyAndNotCrash(self) -> None: command_str = '.' c1 = Command(command_str) c1.get_command_args() def test_SqlCommandStore_isEmpty(self) -> None: command_store = SqlCommandStore(':memory:') self.assertEqual(0, command_store.get_num_commands()) def test_search_commands_whenTermIsDifferentCase_shouldNotReturn(self) -> None: store = SqlCommandStore(':memory:') store.add_command(Command('Add')) matches = store.search_commands(["add"]) self.assertEqual(0, len(matches)) matches = store.search_commands(["Add"]) self.assertEqual(1, len(matches)) def test_search_commands_whenHasContext_shouldCorrectlyAddContext(self) -> None: store = SqlCommandStore(':memory:') store.add_command(Command(command_str='Add', directory_context='directory/path')) matches = store.search_commands(["Add"]) self.assertEqual(1, len(matches)) def test_store_add_whenCommandHasContext_shouldInsertWithContext(self) -> None: command_store = SqlCommandStore(':memory:') self.assertEqual(0, command_store.get_num_commands()) command_str = "some command string" context_path = 'some/directory/context' command1 = Command(command_str, 10.0, 1, 'command info', context_path) command_str2 = "some second command" command2 = Command(command_str2, 10.0, 1, 'command info', context_path) command_store.add_command(command1) command_store.add_command(command2) self.assertEqual(2, command_store.get_num_commands()) def test_get_commands_from_context_whenContextQueried_shouldReturn2Commands(self) -> None: command_store = SqlCommandStore(':memory:') self.assertEqual(0, command_store.get_num_commands()) command_str = "some command string" context_path = 'some/directory/context' command1 = Command(command_str, 11.0, 1, 'command info 1', context_path) command_str2 = "some second command" command2 = Command(command_str2, 12.0, 2, 'command info 2', context_path) command_store.add_command(command1) command_store.add_command(command2) results = command_store.get_command_with_context(context_path, []) self.assertEqual(2, command_store.get_num_commands()) self.assertEqual(2, len(results)) self.assertEqual(command_str2, results[0].get_unique_command_id()) self.assertEqual(command_str, results[1].get_unique_command_id()) self.assertEqual(context_path, results[0].get_directory_context()) self.assertEqual(context_path, results[1].get_directory_context()) def test_get_commands_from_context_whenContextQueried_shouldReturn1Commands(self) -> None: command_store = SqlCommandStore(':memory:') self.assertEqual(0, command_store.get_num_commands()) command_str = "some command string" context_path = 'some/directory/context' context_path2 = 'notqueried/dir' command1 = Command(command_str, 11.0, 1, 'command info 1', context_path) command_str2 = "some second command" command2 = Command(command_str2, 12.0, 2, 'command info 2', context_path2) command_store.add_command(command1) command_store.add_command(command2) results = command_store.get_command_with_context(context_path, []) self.assertEqual(2, command_store.get_num_commands()) self.assertEqual(1, len(results)) self.assertEqual(context_path, results[0].get_directory_context()) def test_add_command_whenSameContextAddedTwice_shouldUpdateTheEntryCount(self) -> None: command_store = SqlCommandStore(':memory:') self.assertEqual(0, command_store.get_num_commands()) command_str = "some command string" context_path = 'some/directory/context' command1 = Command(command_str, 11.0, 1, 'command info 1', context_path) command_store.add_command(command1) command_store.add_command(command1) results = command_store.get_command_with_context(context_path, []) self.assertEqual(1, command_store.get_num_commands()) self.assertEqual(1, len(results)) self.assertEqual(context_path, results[0].get_directory_context()) def test_add_commands_whenCommandAdded2Time_shouldReflectInCount(self) -> None: command_store = SqlCommandStore(':memory:') self.assertEqual(0, command_store.get_num_commands()) command_str = "some command string" context_path = 'some/directory/context' context_path2 = 'notqueried/dir' command1 = Command(command_str, 11.0, 1, 'command info 1', context_path) command_str2 = "some second command" command2 = Command(command_str2, 12.0, 2, 'command info 2', context_path2) command_store.add_command(command1) command_store.add_command(command2) results = command_store.get_command_with_context(context_path, []) self.assertEqual(2, command_store.get_num_commands()) self.assertEqual(1, len(results)) self.assertEqual(context_path, results[0].get_directory_context()) def test_search_commands_sorted(self) -> None: command_store = SqlCommandStore(':memory:') self.assertEqual(0, command_store.get_num_commands()) command_str = "some command string" command = Command(command_str, 10.0, 1) command_store.add_command(command) command_str2 = "somelater command string" command2 = Command(command_str2, 20.0, 1) command_store.add_command(command2) result = command_store.search_commands(["some"], starts_with=False, sort=True) self.assertEqual(result[0].get_unique_command_id(), command2.get_unique_command_id()) self.assertEqual(result[1].get_unique_command_id(), command.get_unique_command_id()) def test_addCommandToSqlStore_whenAddingCommand_shouldBeInStore(self) -> None: command_store = SqlCommandStore(':memory:') self.assertEqual(0, command_store.get_num_commands()) command_str = "some command string" directory_path = 'directory/path' command = Command(command_str, directory_context=directory_path) command_store.add_command(command) self.assertTrue(command_store.has_command(command)) self.assertFalse(command_store.has_command(Command("some other command"))) self.assertEqual(1, command_store.get_num_commands()) def test_addCommand_whenSameCommandAndContext_shouldReturnAppropriateCount(self) -> None: store = SqlCommandStore(':memory:') self.assertEqual(0, store.get_num_commands()) command_str = "some command string" directory_path = 'directory/path' command = Command(command_str, directory_context=directory_path) store.add_command(command) store.add_command(command) store.add_command(command) store.add_command(command) self.assertTrue(store.has_command(command)) self.assertEqual(1, store.get_num_commands()) commands = store.get_command_with_context(directory_path, []) self.assertEqual(1, len(commands)) result_command = commands[0] self.assertEqual(4, result_command.get_count_seen()) def test_addCommand_whenSameCommandButContextChanges_shouldReturnAppropriateCountof3( self) -> None: store = SqlCommandStore(':memory:') self.assertEqual(0, store.get_num_commands()) command_str = "some command string" directory_path = 'directory/path' command = Command(command_str, directory_context=directory_path) command_diff_context = Command(command_str, directory_context='differnt/context') store.add_command(command) store.add_command(command) store.add_command(command) store.add_command(command_diff_context) self.assertTrue(store.has_command(command)) self.assertEqual(1, store.get_num_commands()) commands = store.get_command_with_context(directory_path, []) self.assertEqual(1, len(commands)) result_command = commands[0] self.assertEqual(3, result_command.get_count_seen()) def test_addCommand_when2Commands_shouldReturnAppropriateTimeOrder( self) -> None: store = SqlCommandStore(':memory:') self.assertEqual(0, store.get_num_commands()) command_str1 = "some command string" directory_path1 = 'directory/path' command1 = Command(command_str1, last_used=1, directory_context=directory_path1) command_str2 = "some different command" command2 = Command(command_str2, last_used=2, directory_context=directory_path1) store.add_command(command1) store.add_command(command2) commands = store.get_command_with_context(directory_path1, []) self.assertEqual(2, len(commands)) result_command = commands[0] # Newer on first self.assertEqual(command2.get_unique_command_id(), result_command.get_unique_command_id()) def test_addCommand_whenCommandsDeleted_shouldNotShowupInResultsforDirSearch(self) -> None: store = SqlCommandStore(':memory:') self.assertEqual(0, store.get_num_commands()) command_str1 = "some command string" directory_path1 = 'directory/path' command1 = Command(command_str1, last_used=1, directory_context=directory_path1) command_str2 = "some different command" command2 = Command(command_str2, last_used=2, directory_context=directory_path1) store.add_command(command1) store.add_command(command2) commands = store.get_command_with_context(directory_path1, []) self.assertEqual(2, len(commands)) store.delete_command(command_str1) commands = store.get_command_with_context(directory_path1, []) self.assertEqual(1, len(commands)) self.assertEqual(commands[0].get_unique_command_id(), command2.get_unique_command_id()) store.add_command(command1) store.delete_command(command_str2) commands = store.get_command_with_context(directory_path1, []) self.assertEqual(1, len(commands)) self.assertEqual(commands[0].get_unique_command_id(), command1.get_unique_command_id()) self.assertEqual(commands[0].get_count_seen(), 1) def test_addCommand_when2CommandsAndQueryParams_shouldReturnAppropriateMatchingQuery( self) -> None: store = SqlCommandStore(':memory:') self.assertEqual(0, store.get_num_commands()) command_str1 = "some command string" directory_path1 = 'directory/path' command1 = Command(command_str1, last_used=1, directory_context=directory_path1) command_str2 = "some different command" command2 = Command(command_str2, last_used=2, directory_context=directory_path1) store.add_command(command1) store.add_command(command2) commands = store.get_command_with_context(directory_path1, ['different']) self.assertEqual(1, len(commands)) result_command = commands[0] # Newer on first self.assertEqual(command2.get_unique_command_id(), result_command.get_unique_command_id()) def test_addCommand_whenMultipleCommandsAndQueryParams_shouldReturnAppropriateMatchingQuery( self) -> None: store = SqlCommandStore(':memory:') self.assertEqual(0, store.get_num_commands()) command_str1 = "some command string" directory_path1 = 'directory/path' command1 = Command(command_str1, last_used=1, directory_context=directory_path1) command_str2 = "some different command" command_str3 = "different command" command2 = Command(command_str2, last_used=2, directory_context=directory_path1) command3 = Command(command_str3, last_used=3, directory_context=directory_path1) store.add_command(command1) store.add_command(command2) store.add_command(command3) commands = store.get_command_with_context(directory_path1, ['different']) self.assertEqual(2, len(commands)) result_command = commands[0] # Newer on first self.assertEqual(command3.get_unique_command_id(), result_command.get_unique_command_id())
import asyncio from telethon.events import ChatAction, NewMessage from telethon.tl.types import MessageEntityMentionName from firebot import CMD_HELP from firebot.function import get_all_admin_chats, is_admin from firebot.modules.sql_helper import gban_sql from firebot.modules.sql_helper.mute_sql import is_muted, mute, unmute from firebot.utils import fire_on_cmd @fire.on(fire_on_cmd(pattern="gban(?: |$)(.*)")) async def gbun(event): await event.edit("**GBanning User**") sucess = 0 bad = 0 user, reason = await get_user_from_event(event) if not user.id: await event.edit("`Mention A User To Gban`") return if not reason: hmm_r = "#GBanned" elif reason: hmm_r = reason if user.id == bot.uid: await event.edit("**I Can't Gban You Master :(**") return if gban_sql.is_gbanned(user.id): await event.edit( "**This User Is Already Gbanned. No Point In Gbanning Him Again !**" ) return gban_sql.gban_user(user.id, hmm_r) chat_s = await get_all_admin_chats(event) if len(chat_s) == 0: await event.edit( "**You Need To Be Admin In Atleast 1 Group To Perform this Action**" ) return len_s = len(chat_s) await event.edit( f"**GBanning !** [{user.first_name}](tg://user?id={user.id}) **in {len_s} Chats!**" ) for stark_s in chat_s: try: await event.client.edit_permissions(stark_s, user.id, view_messages=False) sucess += 1 except: bad += 0 await event.edit( f"**GBanned !**[{user.first_name}](tg://user?id={user.id}) **in {sucess} Chats!**" ) @fire.on(fire_on_cmd(pattern="ungban(?: |$)(.*)")) async def ungbun(event): await event.edit("**Un-GBanning User**") sucess = 0 bad = 0 user, reason = await get_user_from_event(event) if not user.id: await event.edit("`Mention A User To Un-Gban`") return if user.id == bot.uid: await event.edit("**I Can't Un-Gban You Master :(**") return if not gban_sql.is_gbanned(user.id): await event.edit("**This User Is Not Gbanned. No Point In Un-Gbanning !**") return gban_sql.ungban_user(user.id) chat_s = await get_all_admin_chats(event) if len(chat_s) == 0: await event.edit( "**You Need To Be Admin In Atleast 1 Group To Perform this Action**" ) return len_s = len(chat_s) await event.edit( f"**Un-GBanning !** [{user.first_name}](tg://user?id={user.id}) **in {len_s} Chats!**" ) for stark_s in chat_s: try: await event.client.edit_permissions(stark_s, user.id, view_messages=True) sucess += 1 except: bad += 0 await event.edit( f"**Un-GBanned !**[{user.first_name}](tg://user?id={user.id}) **in {sucess} Chats!**" ) @fire.on(ChatAction) async def starky(event): if event.user_joined: hmm = await bot.get_me() if await is_admin(event, hmm.id): sadly = await event.get_user() if gban_sql.is_gbanned(sadly.id): try: await event.client.edit_permissions( event.chat_id, sadly.id, view_messages=False ) await event.reply( f"**#GBanned-User** \nUserID : {sadly.id} \nReason : {gban_sql.is_gbanned(sadly.id)}" ) except: pass @fire.on(NewMessage) async def mi(event): sed = event.sender_id hmm = await bot.get_me() if event.is_private: return if await is_admin(event, hmm.id): if gban_sql.is_gbanned(sed): try: await event.client.edit_permissions( event.chat_id, sed, view_messages=False ) except: pass async def get_user_from_event(event): """Get the user from argument or replied message.""" args = event.pattern_match.group(1).split(" ", 1) extra = None if event.reply_to_msg_id: previous_message = await event.get_reply_message() user_obj = await event.client.get_entity(previous_message.sender_id) extra = event.pattern_match.group(1) elif len(args[0]) > 0: user = args[0] if len(args) == 2: extra = args[1] if user.isnumeric(): user = int(user) if not user: await event.edit("`Pass the user's username, id or reply!`") return if event.message.entities is not None: probable_user_mention_entity = event.message.entities[0] if isinstance(probable_user_mention_entity, MessageEntityMentionName): user_id = probable_user_mention_entity.user_id user_obj = await event.client.get_entity(user_id) return user_obj try: user_obj = await event.client.get_entity(user) except (TypeError, ValueError) as err: await event.edit(str(err)) return None elif event.is_private: hmm = await event.get_input_chat() try: user_obj = await event.client.get_entity(hmm) except (TypeError, ValueError) as err: await event.edit(str(err)) return None extra = event.pattern_match.group(1) return user_obj, extra async def get_user_sender_id(user, event): if isinstance(user, str): user = int(user) try: user_obj = await event.client.get_entity(user) except (TypeError, ValueError) as err: await event.edit(str(err)) return None return user_obj @fire.on(fire_on_cmd(pattern=r"gmute ?(\d+)?")) async def startgmute(event): private = False if event.fwd_from: return elif event.is_private: await event.edit("Unexpected issues or ugly errors may occur!") await asyncio.sleep(3) private = True reply = await event.get_reply_message() if event.pattern_match.group(1) is not None: userid = event.pattern_match.group(1) elif reply is not None: userid = reply.sender_id elif private is True: userid = event.chat_id else: return await event.edit( "Please reply to a user or add their into the command to gmute them." ) event.chat_id await event.get_chat() if is_muted(userid, "gmute"): return await event.edit("`He has Tap Already On His Mouth.`") try: mute(userid, "gmute") except Exception as e: await event.edit("Error occured!\nError is " + str(e)) else: await event.edit("Here A Tape, Now Shutup \nGmuteD") @fire.on(fire_on_cmd(pattern=r"ungmute ?(\d+)?")) async def endgmute(event): private = False if event.fwd_from: return elif event.is_private: await event.edit("Unexpected issues or ugly errors may occur!") await asyncio.sleep(3) private = True reply = await event.get_reply_message() if event.pattern_match.group(1) is not None: userid = event.pattern_match.group(1) elif reply is not None: userid = reply.sender_id elif private is True: userid = event.chat_id else: return await event.edit( "Please reply to a user or add their into the command to ungmute them." ) event.chat_id if not is_muted(userid, "gmute"): return await event.edit("This user is not gmuted") try: unmute(userid, "gmute") except Exception as e: await event.edit("Error occured!\nError is " + str(e)) else: await event.edit("Successfully ungmuted that person") @command(incoming=True) async def watcher(event): if is_muted(event.sender_id, "gmute"): await event.delete() CMD_HELP.update( { "gtools": "**Global Tools**\ \n\n**Syntax : **`.gmute <replying to user message>`\ \n**Usage :** Gmute User And Delete His Msg.\ \n\n**Syntax : **`.ungmute <replying to user message>`\ \n**Usage :** UnGmute User And Stops Deleting His Msgs.\ \n\n**Syntax : **`.gban <replying to user message>`\ \n**Usage :** Gban User And Blow Him From Your Groups\ \n\n**Syntax : **`.ungban <replying to user message>`\ \n**Usage :** Ugban User." } )
from hashlib import sha1 from Crypto import Random from flag import FLAG class PrivateSigningKey: def __init__(self): self.gen = 0x44120dc98545c6d3d81bfc7898983e7b7f6ac8e08d3943af0be7f5d52264abb3775a905e003151ed0631376165b65c8ef72d0b6880da7e4b5e7b833377bb50fde65846426a5bfdc182673b6b2504ebfe0d6bca36338b3a3be334689c1afb17869baeb2b0380351b61555df31f0cda3445bba4023be72a494588d640a9da7bd16L self.q = 0x926c99d24bd4d5b47adb75bd9933de8be5932f4bL self.p = 0x80000000000001cda6f403d8a752a4e7976173ebfcd2acf69a29f4bada1ca3178b56131c2c1f00cf7875a2e7c497b10fea66b26436e40b7b73952081319e26603810a558f871d6d256fddbec5933b77fa7d1d0d75267dcae1f24ea7cc57b3a30f8ea09310772440f016c13e08b56b1196a687d6a5e5de864068f3fd936a361c5L self.key = int(FLAG.encode("hex"), 16) def sign(self, m): def bytes_to_long(b): return long(b.encode("hex"), 16) h = bytes_to_long(sha1(m).digest()) u = bytes_to_long(Random.new().read(20)) assert(bytes_to_long(m) % (self.q - 1) != 0) k = pow(self.gen, u * bytes_to_long(m), self.q) r = pow(self.gen, k, self.p) % self.q s = pow(k, self.q - 2, self.q) * (h + self.key * r) % self.q assert(s != 0) return r, s
import os import pyttsx3 import webbrowser import speech_recognition as sr def get(): r = sr.Recognizer() with sr.Microphone() as source: print("How can I help you? ") audio = r.listen(source) print("done!") try: ch = r.recognize_google(audio).lower() print("You said : "+ ch) return ch except Exception as e: print(e) print("Hello Keerthana Pravallika ! , This is your Personal Assistant \'Rosie\'") pyttsx3.speak("Hello Keerthana pravallika , This is your Personal Assistant Rosie") print("If you want to stop this program just enter \'stop\'") pyttsx3.speak("How can I help you ") ch = get() while(True): if(("notepad" in ch or "editor" in ch or "text editor" in ch or "write" in ch ) and ("not" not in ch)): pyttsx3.speak("Opening Notepad") os.system("notepad") elif(("browse" in ch or "google" in ch or "search" in ch or "chrome" in ch or "net" in ch or "internet" in ch) and ("not" not in ch)): pyttsx3.speak("Opening Chrome") os.system("chrome") elif(("message" in ch or "whatsapp" in ch or "text" in ch or "ping" in ch or "call" in ch) and ("not" not in ch)): pyttsx3.speak("Opening Whatsapp") webbrowser.open("whatsapp.com") elif(("youtube" in ch or "you tube" in ch) and ("not" not in ch) ): pyttsx3.speak("Opening you tube") webbrowser.open("youtube.com") elif("twitter" in ch and ("not" not in ch)): pyttsx3.speak("Opening Twitter") webbrowser.open("twitter.com") elif("facebook" in ch and ("not" not in ch)): pyttsx3.speak("Opening Facebook") webbrowser.open("facebook.com") elif("linkedin" in ch and ("not" not in ch)): pyttsx3.speak("Opening Linkedin") webbrowser.open("linkedin.com") elif("mail" in ch and ("not" not in ch)): pyttsx3.speak("Opening Gmail") webbrowser.open("gmail.com") elif(("media" in ch or "player" in ch or "wmplayer" in ch ) and ("not" not in ch)): pyttsx3.speak("Opening Windows media player") os.system("wmplayer") elif(("calculate" in ch or "calculator" in ch or "calculations" in ch) and ("not" not in ch)): pyttsx3.speak("Opening Calculator") os.system("calc.exe") elif(("paint" in ch or "draw" in ch or "sketch" in ch) and ("not" not in ch)): pyttsx3.speak("Opening Paint") os.system("mspaint.exe") elif("thank" in ch ): pyttsx3.speak("You are welcome") print("You are welcome :)") elif("help" in ch): pyttsx3.speak("I can open Notepad , Google , Windows Media player , You tube , Gmail , Linkedin , Facebook , Twitter , Calculator , Paint and Whatsapp") print("Notepad") print("Google") print("Windows Media Player") print("You Tube") print("Gmail") print("Linkedin") print("Facebook") print("Twitter") print("Calculator") print("Paint") print("Whatsapp") elif("do not" in ch): print("Okay!") pyttsx3.speak("Fine") else: pyttsx3.speak("Sorry, I could not understand. Once say help to see whether the command is there or not ") print("Sorry, I could not understand. Once say help to see whether the command is there or not") pyttsx3.speak("How else can I help you") ch = get() if("stop" in ch): pyttsx3.speak("Thank you , Have a nice day") print("Thank you , Have a Nice day !") break
import copy import json import pathlib from typing import Union, List, Dict, Tuple from iiif.iiif_server import IIIFServer SAMPLES_CANVAS = { "@type": "sc:Canvas", "@id": "http://localhost:80/canvas1", "label": "", "width": 100, "height": 100, "images": [ { "@type": "oa:Annotation", "motivation": "sc:painting", "on": "http://localhost:80/canvas1", "resource": { "@type": "dctypes:Image", "@id": "http://localhost/fcgi-bin/iipsrv.fcgi?IIIF=PalaisDuLouvre.tif/full/200,/0/default.jpg", "service": { "@context": "http://iiif.io/api/image/2/context.json", "@id": "http://localhost:80/fcgi-bin/iipsrv.fcgi?IIIF=PalaisDuLouvre.tif", "profile": "http://iiif.io/api/image/2/level2.json" } } } ] } SAMPLE_MANIFEST = { "@context": "http://iiif.io/api/presentation/2/context.json", "@type": "sc:Manifest", "@id": "http://localhost:80/manifests/manifest.json", "label": "", "description": "", "attribution": "", "sequences": [ { "@type": "sc:Sequence", "canvases": [ ] } ] } ATTRIBUTION, DESCRIPTION, LABEL = "attribution", "description", "label" class CanvasBuilder: def __init__(self, canvas_id: pathlib.Path, image: pathlib.Path, server: IIIFServer): self._canvas_dict = copy.deepcopy(SAMPLES_CANVAS) self._server = server assert image.is_file() self._canvas_dict["@id"] = self._image_dict()["on"] = self._server.url_to(canvas_id) suffix = "/full/100,/0/default.jpg" self._image_dict()["resource"]["@id"] = self._server.image_path_to_url(image) + suffix self._set_service_value("@id", self._server.image_path_to_url(image)) def _image_dict(self): return self._canvas_dict["images"][0] def set_service_context(self, url: str): self._set_service_value("@context", url) def set_service_profile(self, url: str): self._set_service_value("profile", url) def _set_service_value(self, key: str, url: str): self._image_dict()["resource"]["service"][key] = url def build(self): return copy.deepcopy(self._canvas_dict) def set_label(self, label: str): self._canvas_dict[LABEL] = label class ManifestBuilder: def __init__(self, server: IIIFServer, location: pathlib.Path): self._server = server self._manifest_dict = copy.deepcopy(SAMPLE_MANIFEST) self._location = location assert location.suffix == ".json", f"json suffix Expected, actual={location.suffix}" self._manifest_dict["@id"] = self._server.path_to_url(location) self._canvases_ids = set() def save(self): manifest = self.build() self._location.parent.mkdir(parents=True, exist_ok=True) with self._location.open("w") as f: json.dump(manifest, f, indent=4, sort_keys=True) def __setitem__(self, key: str, value: str): assert key in [ATTRIBUTION, DESCRIPTION, LABEL] self._manifest_dict[key] = value def build(self): return copy.deepcopy(self._manifest_dict) def add_canvas(self, canvas): if canvas["@id"] in self._canvases_ids: return assert canvas["@id"] not in self._canvases_ids self._canvases_ids.add(canvas["@id"]) self._manifest_dict["sequences"][0]["canvases"].append(canvas) def canvas_count(self): return len(self._canvases_ids) def set_local_context_path(self, context_path: pathlib.Path): assert context_path.suffix == ".json", f"json suffix expected, actual={context_path.suffix}" if not context_path.is_file(): raise FileNotFoundError self._manifest_dict["@context"] = self._server.path_to_url(context_path) def get(obj: Union[List, Dict], key: Tuple): current = obj for k in key: current = current[k] return current def is_reachable(url): try: import requests response = requests.get(url) return response.ok except: return True def validate_many_reachable(obj, keys): for url in [get(obj, k) for k in keys]: assert is_reachable(url) def validate_manifest_files_are_reachable(manifest_dict): validate_many_reachable(manifest_dict, [("@id",), ("@context",)]) for canvas in manifest_dict["sequences"][0]["canvases"]: image = canvas["images"][0] validate_many_reachable(image, [("resource", "@id",), ("resource", "service", "profile"), ("resource", "service", "@id"), ("resource", "service", "@context"), ])
from functools import wraps from flask import Flask, request, jsonify import uuid app = Flask(__name__) def _mock_get_things_from_db(id=None): things = [{'id': '2ce41c1a-095e-4e1f-8c08-59bfd89fa806', 'name': 'thing1'}, {'id': 'c1a68009-97f0-4ca9-94fd-c300b63314b1', 'name': 'thing2'}] if id: thing = [t for t in things if t['id'] == id] return thing[0] if thing else None return things def handle_errors(f, is_json_response=True): @wraps(f) def decorated(*args, **kwargs): response = {} try: response = f(*args, **kwargs) except Exception as e: print(str(e)) response = { 'body': { 'message': str(e) }, 'status_code': 500 } return jsonify(response['body']), \ response['status_code'], \ response.get('headers', {}) return decorated @app.route('/health', methods=['GET']) @handle_errors def health(): return { 'status_code': 200, 'body': { 'status': 'success' } } @app.route('/things', methods=['GET']) @handle_errors def get_things(): return { 'status_code': 200, 'body': _mock_get_things_from_db() } @app.route('/things', methods=['POST']) @handle_errors def post_things(): if not request.get_json().get('name'): return { 'status_code': 400, 'headers': {}, 'body': {'message': 'Missing key: name.'} } return { 'status_code': 201, 'headers': {}, 'body': {'id': str(uuid.uuid4()), 'name': request.get_json()['name']} } @app.route('/things/<thing_id>', methods=['GET']) @handle_errors def get_thing(thing_id): thing = _mock_get_things_from_db(id=thing_id) if not thing: return { 'status_code': 404, 'body': { 'message': 'Thing not found.'} } return { 'status_code': 200, 'headers': {}, 'body': {'id': thing['id'], 'name': thing['name']} } if __name__ == '__main__': app.run(host='0.0.0.0', debug=True, port=80)
#counter for couting number of possibilities counter = 1 #(1 because of 200p is one way) #we go through the for loop for each coins for a in range(3): #100 for b in range(int(1+(200-100*a)/50)): #50 for c in range(int(1+(200-100*a-50*b)/20)): #20 for d in range(int(1+(200-100*a-50*b-20*c)/10)): #10 for e in range(int(1+(200-100*a-50*b-20*c-10*d)/5)): #5 for f in range(int(1+(200-100*a-50*b-20*c-10*d-5*e)/2)): counter += 1 # Total number of ways we can form the 200p print(counter)
# Copyright 2022 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Checkpoint compatibility functions with SaveableObject. Compatibility methods to ensure that checkpoints are saved with the same metadata attributes before/after the SaveableObject deprecation. """ _LEGACY_SAVEABLE_NAME = "_LEGACY_SAVEABLE_NAME" def legacy_saveable_name(name): """Decorator to set the local name to use in the Checkpoint. Needed for migrating certain Trackables from the legacy `_gather_saveables_for_checkpoint` to the new `_serialize_to_tensors` function. This decorator should be used if the SaveableObject generates tensors with different names from the name that is passed to the factory. Args: name: String name of the SaveableObject factory (the key returned in the `_gather_saveables_for_checkpoint` function) Returns: A decorator. """ def decorator(serialize_to_tensors_fn): setattr(serialize_to_tensors_fn, _LEGACY_SAVEABLE_NAME, name) return serialize_to_tensors_fn return decorator def get_saveable_name(obj): # pylint: disable=protected-access obj_serialize_fn = obj._serialize_to_tensors if hasattr(obj_serialize_fn, "__func__"): obj_serialize_fn = obj_serialize_fn.__func__ return getattr(obj_serialize_fn, _LEGACY_SAVEABLE_NAME, None) # pylint: enable=protected-access
# coding= utf-8 from __future__ import unicode_literals import pytest import forth class TestTheParser(): def test_empty_string(self): """ Parser refuses to parse past the end of the string. """ p = forth.Parser('') with pytest.raises(StopIteration): p.parse_whitespace() with pytest.raises(StopIteration): p.parse_word() with pytest.raises(StopIteration): p.parse_rest_of_line() with pytest.raises(StopIteration): p.next_word() assert p.is_finished == True def test_all_whitespace(self): """ Parser consumes all whitespace in one gulp. """ whitespace_string = " \t\t \t \t" p = forth.Parser(whitespace_string) assert p.parse_word() is None assert p.parse_whitespace() == whitespace_string with pytest.raises(StopIteration): p.next_word() assert p.is_finished == True # Also, next_word will happily consume and ignore the whitespace itself. p = forth.Parser(whitespace_string) with pytest.raises(StopIteration): p.next_word() assert p.is_finished == True def test_single_word(self): """ A single word is returned immediately. """ p = forth.Parser("JUST-ONE-WORD") assert p.next_word() == "JUST-ONE-WORD" # no further words exist with pytest.raises(StopIteration): p.next_word() assert p.is_finished == True def test_leading_whitespace(self): """ Leading whitespace is ignored. """ p = forth.Parser(" \t HELLO-WORLD") assert p.next_word() == 'HELLO-WORLD' # no further words exist with pytest.raises(StopIteration): p.next_word() assert p.is_finished == True def test_more_words(self): """ Multiple words are returned one at a time. """ p = forth.Parser("AND ON THE PEDESTAL,") assert p.next_word() == 'AND' assert p.next_word() == 'ON' assert p.next_word() == 'THE' assert p.next_word() == 'PEDESTAL,' with pytest.raises(StopIteration): p.next_word() assert p.is_finished == True def test_more_whitespace(self): """ All whitespace is eaten together and has no effect on words. """ p = forth.Parser(" \tTHESE\t\tWORDS APPEAR \t ") assert p.next_word() == 'THESE' assert p.next_word() == 'WORDS' assert p.next_word() == 'APPEAR' with pytest.raises(StopIteration): p.next_word() assert p.is_finished == True def test_newlines(self): """ Newlines get consumed like other whitespace """ p = forth.Parser("MY NAME IS OZYMANDIAS,\nKING OF KINGS!") assert p.next_word() == 'MY' assert p.next_word() == 'NAME' assert p.next_word() == 'IS' assert p.next_word() == 'OZYMANDIAS,' assert p.next_word() == 'KING' assert p.next_word() == 'OF' assert p.next_word() == 'KINGS!' with pytest.raises(StopIteration): p.next_word() assert p.is_finished == True
# Copyright 2017 British Broadcasting Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gevent from nmoscommon.logger import Logger INTERVAL = 10 TIMEOUT = 9 LOCK_TIMEOUT = 15 class TooLong(Exception): pass class GarbageCollect(object): parent_tab = { 'devices': [('nodes', 'node_id')], 'senders': [('devices', 'device_id')], 'receivers': [('devices', 'device_id')], 'sources': [('devices', 'device_id')], 'flows': [('devices', 'device_id'), ('sources', 'source_id')] } def __init__(self, registry, identifier, logger=None, interval=INTERVAL): """ interval Number of seconds between checks / collections. An interval of '0' means 'never check'. """ self.registry = registry self.logger = Logger("garbage_collect", logger) self.identifier = identifier if interval > 0: gevent.spawn_later(interval, self.garbage_collect) def garbage_collect(self): # Check to see if garbage collection hasn't been done recently (by another aggregator) # Uses ETCD's prevExist=false function # See https://github.com/coreos/etcd/blob/master/Documentation/api.md#atomic-compare-and-swap try: flag = self.registry.put_garbage_collection_flag(host=self.identifier, ttl=LOCK_TIMEOUT) if flag.status_code != 201: self.logger.writeDebug("Not collecting - another collector has recently collected") return # Kick off a collection with a specified timeout. try: with gevent.Timeout(TIMEOUT, TooLong): self._collect() finally: self.logger.writeDebug("remove flag") self._remove_flag() except Exception as e: self.logger.writeError("Could not write garbage collect flag: {}".format(e)) finally: # Always schedule another gevent.spawn_later(INTERVAL, self.garbage_collect) self.logger.writeDebug("scheduled...") def _collect(self): try: self.logger.writeDebug("Collecting: {}".format(self.identifier)) # create list of nodes still alive alive_nodes = [] health_dict = self.registry.get_healths() for h in health_dict.get('/health', {}).keys(): node_name = h.split('/')[-1] alive_nodes.append(node_name) # TODO: GETs... maybe getting the whole response in one go is better? # Maybe doing these async is a good idea? For now, this suffices. all_types = ["nodes", "devices", "senders", "receivers", "sources", "flows"] resources = {rtype: self.registry.get_all(rtype) for rtype in all_types} # Get a flat list of (type, resource) pairs for existing resources # TODO: combine with above all_resources = [] for res_type, res in resources.items(): all_resources += [(res_type, x) for x in res] # Initialise the removal queue with any dead nodes nodes = [x.strip('/') for x in self.registry.getresources("nodes")] # TODO: already have this above... kill_q = [('nodes', node_id) for node_id in nodes if node_id not in alive_nodes] # Create a list of (type, id) pairs of resources that should be removed. to_kill = [] # Find orphaned resources kill_q += self.__find_dead_resources(all_resources, to_kill) # Process the removal queue. while kill_q: gevent.sleep(0.0) # Add these resources to the list of removals to_kill += kill_q # Reduce search space; this resource can never parent another # This proves to be faster in the long run. all_resources = [x for x in all_resources if (x[0], x[1]['id']) not in to_kill] # Look through remaining resources and get a new kill_q kill_q = self.__find_dead_resources(all_resources, to_kill) for resource_type, resource_id in to_kill: self.logger.writeInfo("removing resource: {}/{}".format(resource_type, resource_id)) self.registry.delete(resource_type, resource_id) except self.registry.RegistryUnavailable: self.logger.writeWarning("registry unavailable") except TooLong: self.logger.writeWarning("took too long") except Exception as e: self.logger.writeError("unhandled exception: {}".format(e)) def __find_dead_resources(self, all_resources, to_kill): def is_alive(parent_def): if parent_def in to_kill: return False parent_type, parent_id = parent_def found_parent = next((x for x in all_resources if x[0] == parent_type and x[1]['id'] == parent_id), None) return found_parent is not None # Build a list of resource to remove kill_q = [] # Look through all remaining resources for child_type, child in all_resources: # We need never consider nodes; they should have already been marked. if child_type == "nodes": continue child_id = child['id'] # Get parent for child. There is only ever one; anything with multiple # parent entries in the parent table has multiple entries for backward # compatibility, in order strongest->weakest. parents = [ (parent_type, child.get(parent_key)) for parent_type, parent_key in self.parent_tab.get(child_type, (None, None)) ] parent = next((x for x in parents if x[1] is not None), None) if parent is None or not is_alive(parent): kill_q.append((child_type, child_id)) return kill_q def _remove_flag(self): try: self.registry.delete_raw("garbage_collection") except Exception as e: self.logger.writeWarning("Could not remove flag: {}".format(e))
from functools import reduce from select import select from threading import Thread import approxeng.input.sys as sys from approxeng.input.controllers import * EV_KEY = 1 EV_REL = 2 EV_ABS = 3 class ControllerResource: """ General resource which binds one or more controllers on entry and unbinds the event listening thread on exit. """ def __init__(self, *requirements, print_events=False, **kwargs): """ Create a new resource to bind and access one or more controllers. If no additional arguments are supplied this will find the first controller of any kind enabled by the library. Otherwise the requirements must be provided as a list of ControllerRequirement :param ControllerRequirement requirements: ControllerRequirement instances used, in order, to find and bind controllers. If empty this will be equivalent to supplying a single unfiltered requirement and will match the first specified controller. :param bool print_events: Defaults to False, if set to True then all events picked up by the binder will be printed to stdout. Use this when you're trying to figure out what events correspond to what axes and buttons! :param kwargs: Any addition keyword arguments are passed to the constructors for the controller classes. This is useful particularly to specify e.g. dead and hot zone ranges on discovery. :raises ControllerNotFoundError: If the requirement can't be satisfied, or no requirements are specified but there aren't any controllers. """ self.discoveries = find_matching_controllers(*requirements, **kwargs) self.unbind = None self.print_events = print_events def __enter__(self): """ Called on entering the resource block, returns the controller passed into the constructor. """ self.unbind = bind_controllers(*self.discoveries, print_events=self.print_events) if len(self.discoveries) == 1: return self.discoveries[0].controller else: return tuple(discovery.controller for discovery in self.discoveries) def __exit__(self, exc_type, exc_value, traceback): """ Called on resource exit, unbinds the controller, removing the listening thread. """ self.unbind() def bind_controllers(*discoveries, print_events=False): """ Bind a controller or controllers to a set of evdev InputDevice instances, starting a thread to keep those controllers in sync with the state of the hardware. :param ControllerDiscovery discoveries: ControllerDiscovery instances specifying the controllers and their associated input devices :param bool print_events: Defaults to False, if set to True then all events picked up by this binder will be printed to stdout :return: A function which can be used to stop the event reading thread and unbind from the device """ discoveries = list(discoveries) class SelectThread(Thread): def __init__(self): Thread.__init__(self, name='evdev select thread') self.daemon = True self.running = True self.device_to_controller_discovery = {} for discovery in discoveries: for d in discovery.devices: self.device_to_controller_discovery[d.fn] = discovery self.all_devices = reduce(lambda x, y: x + y, [discovery.devices for discovery in discoveries]) def run(self): for discovery in discoveries: discovery.controller.device_unique_name = discovery.name while self.running: try: r, w, x = select(self.all_devices, [], [], 0.5) for fd in r: active_device = fd controller_discovery = self.device_to_controller_discovery[active_device.fn] controller = controller_discovery.controller controller_devices = controller_discovery.devices prefix = None if controller.node_mappings is not None and len(controller_devices) > 1: try: prefix = controller.node_mappings[active_device.name] except KeyError: pass for event in active_device.read(): if print_events: print(event) if event.type == EV_ABS or event.type == EV_REL: controller.axes.axis_updated(event, prefix=prefix) elif event.type == EV_KEY: # Button event if event.value == 1: # Button down controller.buttons.button_pressed(event.code, prefix=prefix) elif event.value == 0: # Button up controller.buttons.button_released(event.code, prefix=prefix) except Exception as e: self.stop(e) def stop(self, exception=None): for discovery in discoveries: discovery.controller.device_unique_name = None discovery.controller.exception = exception self.running = False polling_thread = SelectThread() # Force an update of the LED and battery system cache sys.scan_cache(force_update=True) for device in polling_thread.all_devices: device.grab() def unbind(): polling_thread.stop() for dev in polling_thread.all_devices: try: dev.ungrab() except IOError: pass polling_thread.start() return unbind
from django.core.management.base import BaseCommand, CommandError from aggregator.models import * from django.conf import settings import prestodb import psycopg2 import requests import json from datetime import datetime class Command(BaseCommand): help = 'Collects the datasets/tables on Presto and updates all the metadata' def add_arguments(self, parser): parser.add_argument('--update_old', default=False, help='For already added datasets, just update their ' 'variables and dimensions without creating new ones') def handle(self, *args, **options): # GET JWT for fileHandler response = requests.post(settings.PARSER_LOG_IN_URL, data=json.dumps({"username": settings.PARSER_USERNAME, "password": settings.PARSER_PASSWORD})) FILEHANDLER_JWT = response.headers["Authorization"] # GET JWT for Harmonization headers = {'Content-type': 'application/json'} response = requests.post(settings.HARMONIZATION_AUTH, data=json.dumps({"username": settings.HARMONIZATION_USERNAME, "password": settings.HARMONIZATION_PASSWORD}), headers=headers) self.stdout.write(str(response)) HARMONIZATION_JWT = str(response.json()["access_token"]) # GET THE TABLES FROM DJANGO all_tables_django = [d['table_name'] for d in Dataset.objects.filter(stored_at='UBITECH_PRESTO').values('table_name')] self.stdout.write('tables in django') self.stdout.write(str(all_tables_django)) self.stdout.write(str(len(all_tables_django))) # # GET THE TABLES FROM FILEHANDLER PROFILES # headers = {'Authorization': FILEHANDLER_JWT} # response = requests.get(settings.PROFILES_URL, headers=headers) # profile_list = response.json() # all_tables_profiles = [profile["storageTable"] for profile in profile_list] # self.stdout.write('tables with profiles') # self.stdout.write(str(all_tables_profiles)) # self.stdout.write(str(len(all_tables_profiles))) # GET THE TABLES FROM HARMONIZATION PROFILES headers = {'Authorization': "JWT " + HARMONIZATION_JWT} response = requests.get(settings.HARMONIZATION_DATASET_LIST_URL, headers=headers) profile_list = response.json() all_tables_profiles = [profile["storageTable"] for profile in profile_list] self.stdout.write('tables with profiles') self.stdout.write(str(all_tables_profiles)) self.stdout.write(str(len(all_tables_profiles))) # FIND THE TABLES THAT DO NOT EXIST IN DJANGO tables_to_add = list(set(all_tables_profiles) - set(all_tables_django)) self.stdout.write('tables to add') self.stdout.write(str(tables_to_add)) self.stdout.write(str(len(tables_to_add))) possible_dimensions = ["latitude", "longitude", "time", "platform_id", "depth", "manually_entered_depth", "automatically_measured_latitude", "automatically_measured_longitude", "voyage_number", "trip_identifier", "timestamp", "ship_id", "ship_name", "imo_id", 'imo'] possible_vessel_identifiers = ["platform_id", "platform_id_hash", "ship_id", "ship_name", "imo_id", 'imo', "voyage_number", "trip_identifier"] for i, profile in enumerate(profile_list[:]): print "Profile: " + str(i) if profile["storageTable"] in tables_to_add: dataset = Dataset(title=profile["title"], source=profile["source"], stored_at="UBITECH_PRESTO", table_name=profile["storageTable"], publisher=profile["publisher"]) self.stdout.write('adding dataset '+str(dataset.title)) else: # THIS MAY RETURN MORE THAN ONE DATASETS BECAUSE 1 DATASET CAN BE RELATED TO MANY PROFILES dataset = Dataset.objects.filter(stored_at="UBITECH_PRESTO", table_name=profile["storageTable"]).first() self.stdout.write('modifying dataset ' + str(dataset.title)) basic_info = ['title', 'source', 'storageTable', 'publisher', 'description', 'spatialEast', 'spatialSouth', 'spatialNorth', 'spatialWest', 'temporalCoverageBegin', 'temporalCoverageEnd', 'license', 'observations'] for field in basic_info: try: self.stdout.write('setting' + str(field)) setattr(dataset, field, profile[field]) dataset.save() except Exception, e: setattr(dataset, field, None) dataset.save() pass if profile["accessRights"] == 'Public': dataset.private = False else: dataset.private = True metadata = {} not_include = basic_info + ["variables", "id", "profileName"] for key in profile.keys(): if key not in not_include: metadata[key] = profile[key] dataset.metadata = metadata dataset.save() ### REMOVE IT # column_list_titles = [var["canonicalName"] for var in profile["variables"]] # dataset_vessel_identifiers = [col for col in column_list_titles if col in possible_vessel_identifiers] ###/ REMOVE IT if profile["storageTable"] in tables_to_add: # if profile["storageTable"] in tables_to_add and len(dataset_vessel_identifiers)>0: dataset_variables = [] dataset_dimensions = [] for var in profile["variables"]: if var["canonicalName"] not in possible_dimensions: dataset_variables.append(var) else: dataset_dimensions.append(var) # this change was made because i.e. in HCMR Aegean Sea Bathymetry, no variable was present if len(dataset_variables) == 0 and 'depth' in [d["canonicalName"] for d in dataset_dimensions]: for d in dataset_dimensions: if d["canonicalName"] == 'depth': dataset_dimensions.remove(d) dataset_variables.append(d) self.stdout.write('variables to add') self.stdout.write(str(dataset_variables)) self.stdout.write('dimensions to add') self.stdout.write(str(dataset_dimensions)) for var in dataset_variables: self.stdout.write('adding '+str(var["canonicalName"])) if var["unit"] is None: var["unit"] = '' # GET one variable info headers = {'Authorization': FILEHANDLER_JWT, 'Content-type': 'application/json'} response = requests.post(settings.VARIABLE_LOOKUP_URL, data=json.dumps([{"name": var["name"], "canonicalName": var["canonicalName"]}]), headers=headers) if len(response.json()) > 0: var_info = response.json()[0] variable = Variable(name=var_info["canonicalName"], title=var_info["title"], original_column_name=var_info["name"], unit=var_info["unit"], description=var_info["description"], sameAs=var_info["sameAs"], dataType=var_info["dataType"], dataset=dataset) else: variable = Variable(name=var["canonicalName"], title=var["name"], original_column_name=var["name"], unit=var["unit"], dataset=dataset) variable.save() for dim in dataset_dimensions: self.stdout.write('adding ' + str(dim["canonicalName"])) if dim["unit"] is None: dim["unit"] = '' # GET one dimension info headers = {'Authorization': FILEHANDLER_JWT, 'Content-type': 'application/json'} response = requests.post(settings.VARIABLE_LOOKUP_URL, data=json.dumps([{"name": dim["name"], "canonicalName": dim["canonicalName"]}]), headers=headers) if len(response.json()) > 0: dim_info = response.json()[0] dimension = Dimension(name=dim_info["canonicalName"], title=dim_info["title"], original_column_name=dim_info["name"], unit=dim_info["unit"], description=dim_info["description"], sameAs=dim_info["sameAs"], dataType=dim_info["dataType"], variable=variable) else: dimension = Dimension(name=dim["canonicalName"], title=dim["name"], original_column_name=dim["name"], unit=dim["unit"], variable=variable) dimension.save() else: if options['update_old']: dataset_variables = [] dataset_dimensions = [] for var in profile["variables"]: if var["canonicalName"] not in possible_dimensions: dataset_variables.append(var) else: dataset_dimensions.append(var) for var in dataset_variables: # GET one variable info headers = {'Authorization': FILEHANDLER_JWT, 'Content-type': 'application/json'} response = requests.post(settings.VARIABLE_LOOKUP_URL, data=json.dumps([{"name": var["name"], "canonicalName": var["canonicalName"]}]), headers=headers) if len(response.json()) > 0: var_info = response.json()[0] self.stdout.write('modifying '+str(var_info["canonicalName"])) try: variable = Variable.objects.get(dataset=dataset, name=var_info["canonicalName"]) except Exception, e: variable = Variable(name=var_info["canonicalName"], title=var_info["title"], original_column_name=var_info["name"], unit=var_info["unit"], description=var_info["description"], sameAs=var_info["sameAs"], dataType=var_info["dataType"], dataset=dataset) pass variable.name = var_info["canonicalName"] variable.title = var_info["title"] variable.original_column_name = var_info["name"] variable.unit = var_info["unit"] variable.description = var_info["description"] variable.sameAs = var_info["sameAs"] variable.dataType = var_info["dataType"] variable.save() else: self.stdout.write('modifying ' + str(var["canonicalName"])) try: variable = Variable.objects.get(dataset=dataset, name=var["canonicalName"]) except Exception, e: variable = Variable(name=var["canonicalName"], title=var["name"], original_column_name=var["name"], unit=var["unit"], dataset=dataset) pass variable.name = var["canonicalName"] variable.title = var["name"] variable.original_column_name = var["name"] variable.unit = var["unit"] variable.save() for dim in dataset_dimensions: # GET one dimension info headers = {'Authorization': FILEHANDLER_JWT, 'Content-type': 'application/json'} response = requests.post(settings.VARIABLE_LOOKUP_URL, data=json.dumps([{"name": dim["name"], "canonicalName": dim["canonicalName"]}]), headers=headers) if len(response.json()) > 0: dim_info = response.json()[0] try: dimension = Dimension.objects.get(variable=variable, name=dim_info["canonicalName"]) except Exception, e: dimension = Dimension(name=dim_info["canonicalName"], title=dim_info["title"], original_column_name=dim_info["name"], unit=dim_info["unit"], description=dim_info["description"], sameAs=dim_info["sameAs"], dataType=dim_info["dataType"], variable=variable) pass dimension.name = dim_info["canonicalName"] dimension.title = dim_info["title"] dimension.original_column_name = dim_info["name"] dimension.unit = dim_info["unit"] dimension.description = dim_info["description"] dimension.sameAs = dim_info["sameAs"] dimension.dataType = dim_info["dataType"] dimension.save() else: try: dimension = Dimension.objects.get(variable=variable, name=dim["canonicalName"]) except Exception, e: dimension = Dimension(name=dim["canonicalName"], title=dim["name"], original_column_name=dim["name"], unit=dim["unit"], variable=variable) pass dimension.name = dim["canonicalName"] dimension.title = dim["name"] dimension.original_column_name = dim["name"] dimension.unit = dim["unit"] dimension.save() headers = {'Authorization': FILEHANDLER_JWT, 'Content-type': 'application/json'} response = requests.get(settings.PARSER_URL + '/fileHandler/table/' + dataset.table_name + '/lastUpdate', headers=headers) if response.content == '': dataset.last_updated = None dataset.save() else: dataset.last_updated = datetime.strptime(response.content, '%Y-%m-%dT%H:%M:%S.%f') dataset.save() if profile["storageTable"] in tables_to_add or options['update_old']: # if 1 == 1: rows_to_render = [] variable_list_canonical = [v.safe_name for v in Variable.objects.filter(dataset=dataset)] variable_list_titles = [v.title for v in Variable.objects.filter(dataset=dataset)] variable_list_units = [v.unit for v in Variable.objects.filter(dataset=dataset)] dimension_list_canonical = [d.name for d in Dimension.objects.filter(variable=Variable.objects.filter(dataset=dataset)[0])] dimension_list_titles = [d.title for d in Dimension.objects.filter(variable=Variable.objects.filter(dataset=dataset)[0])] dimension_list_units = [d.unit for d in Dimension.objects.filter(variable=Variable.objects.filter(dataset=dataset)[0])] column_list_canonical = variable_list_canonical + dimension_list_canonical column_list_titles = variable_list_titles + dimension_list_titles column_list_units = variable_list_units + dimension_list_units column_list_string = "" for column in column_list_canonical: column_list_string += ", " + column column_list_string = column_list_string[1:] column_list_filter_string = "" for column in column_list_canonical[:5]: column_list_filter_string += "AND " + column + " is not NULL " column_list_filter_string = column_list_filter_string[4:] try: presto_credentials = settings.DATABASES['UBITECH_PRESTO'] conn_presto = prestodb.dbapi.connect( host=presto_credentials['HOST'], port=presto_credentials['PORT'], user=presto_credentials['USER'], catalog=presto_credentials['CATALOG'], schema=presto_credentials['SCHEMA'], ) cursor_presto = conn_presto.cursor() query = "SELECT " + column_list_string + " FROM " + str(dataset.table_name) + " WHERE " + column_list_filter_string + " LIMIT 5" print query cursor_presto.execute(query) rows_to_render = cursor_presto.fetchall() sample_rows = [] for row in rows_to_render: sample_row = [] for x in row: if isinstance(x, unicode): y = x.encode('ascii', 'backslashreplace') else: y = x sample_row.append(y) sample_rows.append(sample_row) print rows_to_render dataset.sample_rows = {"column_titles": column_list_titles, "column_units": column_list_units, "data": sample_rows} dataset.save() except Exception, e: print 'error' print str(e) pass if response.content != '' or profile["storageTable"] in tables_to_add or options['update_old']: try: presto_credentials = settings.DATABASES['UBITECH_PRESTO'] conn_presto = prestodb.dbapi.connect( host=presto_credentials['HOST'], port=presto_credentials['PORT'], user=presto_credentials['USER'], catalog=presto_credentials['CATALOG'], schema=presto_credentials['SCHEMA'], ) cursor_presto = conn_presto.cursor() query = "SELECT COUNT(*) FROM " + str(dataset.table_name) print query cursor_presto.execute(query) number_of_rows = cursor_presto.fetchall()[0][0] print number_of_rows dataset.number_of_rows = number_of_rows dataset.save() except Exception, e: print 'error' print str(e) pass self.stdout.write(self.style.SUCCESS('Successfully collected and updated datasets and metadata'))
from django import forms # from django.contrib.auth.models import User # from django.contrib.auth.forms import UserCreationForm from .models import * from mptt.forms import TreeNodeMultipleChoiceField, TreeNodeChoiceField from .widget import CheckboxSelectMultiple class PropertyForm(forms.ModelForm): parent = TreeNodeChoiceField(queryset=Properties.objects.all()) class Meta: model = Properties fields = ['name', 'parent'] class AddressForm(forms.ModelForm): # class Meta: model = Address fields = ['properties_link'] #widgets = {"properties_link": FancyTreeWidget(queryset=Properties.objects.order_by('tree_id', 'lft'),model=Properties)} widgets = {"properties_link": CheckboxSelectMultiple()}
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import pulumi import pulumi.runtime class ServiceLinkedRole(pulumi.CustomResource): """ Provides an [IAM service-linked role](https://docs.aws.amazon.com/IAM/latest/UserGuide/using-service-linked-roles.html). """ def __init__(__self__, __name__, __opts__=None, aws_service_name=None, custom_suffix=None, description=None): """Create a ServiceLinkedRole resource with the given unique name, props, and options.""" if not __name__: raise TypeError('Missing resource name argument (for URN creation)') if not isinstance(__name__, basestring): raise TypeError('Expected resource name to be a string') if __opts__ and not isinstance(__opts__, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') __props__ = dict() if not aws_service_name: raise TypeError('Missing required property aws_service_name') elif not isinstance(aws_service_name, basestring): raise TypeError('Expected property aws_service_name to be a basestring') __self__.aws_service_name = aws_service_name """ The AWS service to which this role is attached. You use a string similar to a URL but without the `http://` in front. For example: `elasticbeanstalk.amazonaws.com`. To find the full list of services that support service-linked roles, check [the docs](https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html). """ __props__['awsServiceName'] = aws_service_name if custom_suffix and not isinstance(custom_suffix, basestring): raise TypeError('Expected property custom_suffix to be a basestring') __self__.custom_suffix = custom_suffix """ Additional string appended to the role name. Not all AWS services support custom suffixes. """ __props__['customSuffix'] = custom_suffix if description and not isinstance(description, basestring): raise TypeError('Expected property description to be a basestring') __self__.description = description """ The description of the role. """ __props__['description'] = description __self__.arn = pulumi.runtime.UNKNOWN """ The Amazon Resource Name (ARN) specifying the role. """ __self__.create_date = pulumi.runtime.UNKNOWN """ The creation date of the IAM role. """ __self__.name = pulumi.runtime.UNKNOWN """ The name of the role. """ __self__.path = pulumi.runtime.UNKNOWN """ The path of the role. """ __self__.unique_id = pulumi.runtime.UNKNOWN """ The stable and unique string identifying the role. """ super(ServiceLinkedRole, __self__).__init__( 'aws:iam/serviceLinkedRole:ServiceLinkedRole', __name__, __props__, __opts__) def set_outputs(self, outs): if 'arn' in outs: self.arn = outs['arn'] if 'awsServiceName' in outs: self.aws_service_name = outs['awsServiceName'] if 'createDate' in outs: self.create_date = outs['createDate'] if 'customSuffix' in outs: self.custom_suffix = outs['customSuffix'] if 'description' in outs: self.description = outs['description'] if 'name' in outs: self.name = outs['name'] if 'path' in outs: self.path = outs['path'] if 'uniqueId' in outs: self.unique_id = outs['uniqueId']
# Generated by Django 3.1 on 2021-09-03 19:10 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0011_profile_call'), ] operations = [ migrations.AddField( model_name='profile', name='address', field=models.CharField(blank=True, max_length=200), ), migrations.AddField( model_name='profile', name='country', field=models.CharField(blank=True, max_length=200), ), migrations.AddField( model_name='profile', name='qth', field=models.CharField(blank=True, max_length=200), ), migrations.AddField( model_name='profile', name='town', field=models.CharField(blank=True, max_length=200), ), ]
from bleak import BleakClient import asyncio notify_uuid = "beb5483e-36e1-4688-b7f5-ea07361b26a" def callback(sender, data): print(sender, data) def run(addresses): loop = asyncio.get_event_loop() tasks = asyncio.gather( *(connect_to_device(address, loop) for address in addresses) ) loop.run_until_complete(tasks) async def connect_to_device(address, loop): print("starting", address, "loop") async with BleakClient(address, loop=loop, timeout=10.0) as client: print("connect to", address) try: await client.start_notify(notify_uuid, callback) await asyncio.sleep(10.0) await client.stop_notify(notify_uuid) except Exception as e: print(address + " : " + str(e)) print("disconnect from", address) if __name__ == "__main__": run([ "4c:75:25:c4:c5:3a", "50:02:91:90:09:4a", ])
import json import time import pyotp import pprint import config import numpy as np import pandas as pd import robin_stocks as rs from decimal import * from base import BaseClass class DiscretionaryWeight(BaseClass): # returns true if not all weights add to 100% def check_discretionary_weights(self, discretionary_weights): # print(round(self.cash_weight, 2)) discretionary_portfolio_weight = Decimal(sum(discretionary_weights.values())) return round(discretionary_portfolio_weight, 2) + round(self.cash_weight, 2) == 1 # returns the total of your discretionary weights - needs to be 1 in order for this to process def sum_discretionary_weights(self, discretionary_weights): discretionary_portfolio_weight = Decimal(sum(discretionary_weights.values())) return (round(discretionary_portfolio_weight, 2) + round(self.cash_weight, 2)) # provides the removed symbols that are not present in your discretionary rebalance def compare_symbols(self, discretionary_weights): symbols = self.symbols_list() removed_symbols = [] for symbol in symbols: if symbol not in discretionary_weights.keys(): removed_symbols.append(symbol) return removed_symbols # closes positions not included in discretionary portfolio def close_positions_not_included(self, removed_symbols): for symbol in removed_symbols: # fetch current quantity quantity = self.quantity_of_shares_per_symbol()[symbol] # sell all order = self.place_sell_order(symbol, quantity, 1) # returns the equity $ to be invested in each symbol - depending on the weight % allocated to it - rounded to nearest penny def target_rebalance_equity_amt(self, discretionary_weights): rebalance_equity = self.portfolio_value_after_cash_reserved() target_equity_allocation = {} for symbol, weight in discretionary_weights.items(): allocated_equity = Decimal(weight) * rebalance_equity target_equity_allocation[symbol] = Decimal(allocated_equity) return target_equity_allocation # returns for each symbol the $ amount required to buy or sell to reach the target decimal % weight allocated to each symbol def equity_allocation_difference(self, discretionary_weights): rebalance_target_equity_amts = self.target_rebalance_equity_amt(discretionary_weights) current_equity_amts = self.equity_of_shares_per_symbol() equity_allocation_difference = {} for symbol, curr_equity_allocation in current_equity_amts.items(): for symbol_, target_equity_allocation in rebalance_target_equity_amts.items(): if symbol == symbol_: # if this is a positive, need to buy $ of share, if negative, sell that amount of shares equity_allocation_difference[symbol] = Decimal((target_equity_allocation - curr_equity_allocation)) return equity_allocation_difference # returns for each symbol the qty amount required to buy or sell to reach the target decimal % weight allocated to each symbol def quantity_allocation_difference(self, discretionary_weights): equity_allocation_difference = self.equity_allocation_difference(discretionary_weights) symbol_price_quote = self.price_quote() quantity_allocation_difference = {} for symbol, allocation_difference in equity_allocation_difference.items(): for symbol_, price_quote in symbol_price_quote.items(): if symbol == symbol_: # if this is a positive, need to buy $ of share, if negative, sell that amount of shares quantity_allocation_difference[symbol] = round(allocation_difference / price_quote, 6) return quantity_allocation_difference # rebalance handler def rebalance(self, discretionary_weights, min_drift_amt=1.5, min_symbol_equity_amt=5): if self.is_market_open(): print('Market is Open!') if not self.check_discretionary_weights(discretionary_weights): print(f'Sorry your discretionary weights do not add up to 100%, they add up to {self.sum_discretionary_weights(discretionary_weights) * 100}') else: if min_drift_amt < 1.5: print(f'{min_drift_amt} is below the Min Threshold $ Amount Per Trade of $1.50') print('Min $ Amount Per Trade Set to Mandatory Minimum Default of $1.50') min_drift_amt = 1.5 print('Rebalance Initiated - Checking User and Portfolio Eligibility for Fractional Trading...') if self.user_eligible_for_fractionals() and self.symbols_eligible_for_fractionals(): print('User and Portfolio Approved – Starting Rebalance...') removed_symbols = self.compare_symbols(discretionary_weights) print('The Following Are The Removed Symbols That Fell Below Threshold Equity Holding Amount of ${}:'.format(min_symbol_equity_amt), *removed_symbols, sep='\n- ') print('Closing Removed Symbols Positions Now...') closing_positions = self.close_positions_not_included(removed_symbols) print('Calculating Rebalance Purchase and Sale Quantities For Each Symbol...') quantity_allocation = self.quantity_allocation_difference(discretionary_weights) print('Sorting Symbols for Rebalance Process...') # returns an array of tuples - used so that you sell first - keeps investable cash reserves ready to deploy for purchasing sorted_sell_to_buy = sorted(quantity_allocation.items(), key=lambda x: x[1]) for symbol, quantity_difference in sorted_sell_to_buy: if quantity_difference >= 0: if self.min_portfolio_drift(symbol, quantity_difference, min_drift_amt): self.place_buy_order(symbol, quantity_difference, 1) elif quantity_difference < 0: if self.min_portfolio_drift(symbol, -quantity_difference, min_drift_amt): self.place_sell_order(symbol, -quantity_difference, 1) else: return print('Rebalance Completed!') else: if not self.user_eligible_for_fractionals(): print('Sorry Your Account is Not Eligible For Fractional Trading') else: print('Sorry a Symbol(s) in your Portfolio Are Not Eligible For Fractionals') else: print('Market is Closed or Within 30 Mins of Closing - Try During Next Open Market Hours')
import cv2 import numpy as np import dlib class Seg2FaceDataMaker(object): def __init__(self, lm_detector_param, out_size, margin=1.0): self._out_size = out_size self._margin = margin self._f_detector = dlib.get_frontal_face_detector() self._lm_detector = dlib.shape_predictor(lm_detector_param) #def make(self, input_image, bbox): # face_img = self._extract_face(input_image, bbox) # seg = self._fill_seg(face_img) # seg_splitted = self._split_seg(seg) # return seg_splitted def make(self, input_image): resized_input = cv2.resize( input_image, (self._out_size[0], self._out_size[1])) seg = self._fill_seg(input_image) seg = cv2.resize(seg, (self._out_size[0], self._out_size[1]), interpolation=cv2.INTER_NEAREST) #seg = (seg.astype(np.float32) / 4.0) - 1. return resized_input, seg def _extract_face(self, input_image, bbox): side = int(max(bbox[2], bbox[3]) * self._margin) border = int(side/2) if input_image.shape[2] == 1: black = [0] else: black = [0, 0, 0] bg = cv2.copyMakeBorder(input_image, border, border, border, border, cv2.BORDER_CONSTANT, value=black) center = (int(bbox[0] + border + bbox[2]/2), int(bbox[1] + border + bbox[3]/2)) cropped = bg[bbox[1]:bbox[1]+side, bbox[0]:bbox[0]+side] resized = cv2.resize(cropped, (self._out_size)) return resized #def make(self, input_image, bbox): # if len(input_image.shape) == 4: # return [self._make(input_image[i], bbox[i]) for i in range(input_image.shape[0])] # else: # return self._make(input_image, bbox) #def _fill_seg(self, input_image): # gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY) # lm = self._lm_detector(gray, [0, 0] + input_image.shape[0:2]) # np_lm = [(lm.part[i].x, lm.part[i].y) for i in range(lm.num_parts)] # seg = np.zeros_like(gray) # seg = cv2.fillPoly(seg, pts=np_lm[0:17], color=1) # seg = cv2.fillPoly(seg, pts=np_lm[36:42], color=2) # seg = cv2.fillPoly(seg, pts=np_lm[42:48], color=3) # seg = cv2.fillPoly(seg, pts=np_lm[17:22], color=4) # seg = cv2.fillPoly(seg, pts=np_lm[22:27], color=5) # seg = cv2.fillPoly(seg, pts=(np_lm[27], np_lim[31], np_lim[35]), color=6) # seg = cv2.fillPoly(seg, pts=np_lm[48:60], color=7) # seg = cv2.fillPoly(seg, pts=np_lm[60:68], color=1) # return seg def _fill_seg(self, input_image): gray = cv2.cvtColor(input_image, cv2.COLOR_BGR2GRAY) #rect = dlib.rectangle(bbox[0], bbox[1], bbox[0]+bbox[2], bbox[1]+bbox[3]) rect = self._f_detector(gray) if len(rect) > 0: lm = self._lm_detector(gray, rect[0]) np_lm = [(lm.part(i).x, lm.part(i).y) for i in range(lm.num_parts)] seg = np.zeros_like(gray, np.uint8) seg = cv2.fillPoly(seg, pts=np.array([np_lm[0:17]], np.int32), color=1) seg = cv2.fillPoly(seg, pts=np.array([np_lm[36:42]], np.int32), color=2) seg = cv2.fillPoly(seg, pts=np.array([np_lm[42:48]], np.int32), color=3) seg = cv2.fillPoly(seg, pts=np.array([np_lm[17:22]], np.int32), color=4) seg = cv2.fillPoly(seg, pts=np.array([np_lm[22:27]], np.int32), color=5) seg = cv2.fillPoly(seg, pts=np.array([[np_lm[27], np_lm[31], np_lm[35]]], np.int32), color=6) seg = cv2.fillPoly(seg, pts=np.array([np_lm[48:60]], np.int32), color=7) seg = cv2.fillPoly(seg, pts=np.array([np_lm[60:68]], np.int32), color=8) return seg else: return None def _split_seg(self, seg): shape = seg.shape shape[-1] = 8 new_seg = np.zeros(shape) for i in range(1,8): new_seg[i] = seg==i return new_seg
ref = {"A": 10, "B": 11, "C": 12, "D": 13, "E": 14, "F": 15} def hex_to_decimal_integer(number): if "." in number: number = number.split('.')[0] result = 0 for index, multiplier in enumerate(range(len(number) - 1, -1, -1)): value = number[index] if value in ref.keys(): value = ref[value] else: value = int(value) result += value * (16 ** multiplier) return result # I don't know why but this have a little error margin def hex_to_decimal_fraction(number): if "." in number: number = number.split(".")[1] else: number = "0" result = 0 for index, value in enumerate(number): if value in ref.keys(): value = ref[value] else: value = int(value) result += int(value) * (16 ** (-1 * (index + 1))) return result def hex_to_decimal(number): integer_part = hex_to_decimal_integer(number) decimal_part = hex_to_decimal_fraction(number) return integer_part + decimal_part def main(): number = input("Hex: ") result = hex_to_decimal(number) print("Number:", result) if __name__ == '__main__': main()
import os, glob, logging import cPickle as pickle from functools import partial import random import math import multiprocessing as mp import multiprocessing.sharedctypes as mps from contextlib import closing import numpy as n from scipy.special import erf import scipy.stats.mstats as mstats import rtpipe.parsems as pm import rtpipe.parsecal as pc import rtpipe.parsesdm as ps from rtpipe.version import __version__ import rtlib_cython as rtlib import pyfftw try: import casautil except ImportError: import pwkit.environments.casa.util as casautil # setup CASA and logging qa = casautil.tools.quanta() logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.captureWarnings(True) logger = logging.getLogger('rtpipe') def pipeline(d, segments): """ Transient search pipeline running on single node. Processes one or more segments of data (in which a single bgsub, (u,v,w), etc. can be used). Can search completely, independently, and saves candidates. If segments is a list of segments, then it will parallelize read/search processes. Stages: 0) Take dictionary that defines metadata and search params -- This defines state of pipeline, including times, uv extent, pipeline search parameters, etc. 1) Read data -- Overlapping reads needed to maintain sensitivity to all DMs at all times 2) Prepare data -- Reads/applies telcal/CASA solutions, flags, bg time subtraction 3) Search using all threads -- Option for plug-and-play detection algorithm and multiple filters 4) Save candidate and noise info, if requested """ if type(segments) == int: segments = [segments] logger.info('Starting search of %s, scan %d, segments %s' % (d['filename'], d['scan'], str(segments))) assert os.path.exists(d['gainfile']), 'Calibration file autodetection failed for gainfile {0}'.format(d['gainfile']) # seed the pseudo-random number generator # TJWL random.seed() # set up shared arrays to fill data_read_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2); data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2) u_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']); u_mem = mps.Array(mps.ctypes.c_float, d['nbl']) v_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']); v_mem = mps.Array(mps.ctypes.c_float, d['nbl']) w_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']); w_mem = mps.Array(mps.ctypes.c_float, d['nbl']) # need these if debugging data = numpyview(data_mem, 'complex64', datashape(d)) # optional data_read = numpyview(data_read_mem, 'complex64', datashape(d)) # optional u = numpyview(u_mem, 'float32', d['nbl'], raw=False) v = numpyview(v_mem, 'float32', d['nbl'], raw=False) w = numpyview(w_mem, 'float32', d['nbl'], raw=False) # plan fft logger.debug('Planning FFT...') arr = pyfftw.empty_aligned((d['npixx'], d['npixy']), dtype='complex64', n=16) arr[:] = n.random.randn(*arr.shape) + 1j*n.random.randn(*arr.shape) fft_arr = pyfftw.interfaces.numpy_fft.ifft2(arr) results = {} # only one needed for parallel read/process. more would overwrite memory space with closing(mp.Pool(1, initializer=initread, initargs=(data_read_mem, u_read_mem, v_read_mem, w_read_mem, data_mem, u_mem, v_mem, w_mem))) as readpool: try: # submit all segments to pool of 1. locking data should keep this from running away. for segment in segments: assert segment in range(d['nsegments']), 'Segment %d not in range of %d nsegments' % (segment, d['nsegments']) candsfile = getcandsfile(d, segment) if d['savecands'] and os.path.exists(candsfile): logger.error('candsfile %s already exists. Ending processing...' % candsfile) else: results[segment] = readpool.apply_async(pipeline_dataprep, (d, segment)) # no need for segment here? need to think through structure... # step through pool of jobs and pull data off as ready. this allows pool to continue to next segment. while results.keys(): for segment in results.keys(): if results[segment].ready(): job = results.pop(segment) d = job.get() else: continue with data_mem.get_lock(): cands = search(d, data_mem, u_mem, v_mem, w_mem) # save candidate info if d['savecands']: logger.info('Saving %d candidates for segment %d...' % (len(cands), segment)) savecands(d, cands) except KeyboardInterrupt: logger.error('Caught Ctrl-C. Closing processing pool.') readpool.terminate() readpool.join() raise def pipeline_dataprep(d, segment): """ Single-threaded pipeline for data prep that can be started in a pool. """ logger.debug('dataprep starting for segment %d' % segment) # dataprep reads for a single segment, so d['segment'] defined here d['segment'] = segment # set up numpy arrays, as expected by dataprep functions data_read = numpyview(data_read_mem, 'complex64', datashape(d), raw=False); data = numpyview(data_mem, 'complex64', datashape(d), raw=False) u_read = numpyview(u_read_mem, 'float32', d['nbl'], raw=False); u = numpyview(u_mem, 'float32', d['nbl'], raw=False) v_read = numpyview(v_read_mem, 'float32', d['nbl'], raw=False); v = numpyview(v_mem, 'float32', d['nbl'], raw=False) w_read = numpyview(w_read_mem, 'float32', d['nbl'], raw=False); w = numpyview(w_mem, 'float32', d['nbl'], raw=False) #### #### #### #### # 1) Read data #### #### #### #### with data_read_mem.get_lock(): if d['dataformat'] == 'ms': # CASA-based read segread = pm.readsegment(d, segment) data_read[:] = segread[0] (u_read[:], v_read[:], w_read[:]) = (segread[1][d['readints']/2], segread[2][d['readints']/2], segread[3][d['readints']/2]) # mid int good enough for segment. could extend this to save per chunk del segread elif d['dataformat'] == 'sdm': data_read[:] = ps.read_bdf_segment(d, segment) (u_read[:], v_read[:], w_read[:]) = ps.get_uvw_segment(d, segment) #### #### #### #### # 2) Prepare data #### #### #### #### # calibrate data if os.path.exists(d['gainfile']): try: radec = (); spwind = []; calname = '' # set defaults if '.GN' in d['gainfile']: # if telcal file if d.has_key('calname'): calname = d['calname'] sols = pc.telcal_sol(d['gainfile']) # parse gainfile else: # if CASA table if d.has_key('calradec'): radec = d['calradec'] # optionally defined cal location spwind = d['spw'] sols = pc.casa_sol(d['gainfile'], flagants=d['flagantsol']) # parse gainfile sols.parsebp(d['bpfile']) # parse bpfile # if gainfile parsed ok, choose best solution for data sols.set_selection(d['segmenttimes'][segment].mean(), d['freq']*1e9, rtlib.calc_blarr(d), calname=calname, pols=d['pols'], radec=radec, spwind=spwind) sols.apply(data_read) except: logger.warning('Could not parse or apply gainfile %s.' % d['gainfile']) raise else: logger.warn('Calibration file not found. Proceeding with no calibration applied.') # flag data if len(d['flaglist']): logger.info('Flagging with flaglist: %s' % d['flaglist']) dataflag(d, data_read) else: logger.warn('No real-time flagging.') # mean t vis subtration if d['timesub'] == 'mean': logger.info('Subtracting mean visibility in time...') rtlib.meantsub(data_read, [0, d['nbl']]) else: logger.warn('No mean time subtraction.') # save noise pickle if d['savenoise']: noisepickle(d, data_read, u_read, v_read, w_read, chunk=200) # phase to new location if l1,m1 set and nonzero value try: if any([d['l1'], d['m1']]): logger.info('Rephasing data to (l, m)=(%.4f, %.4f).' % (d['l1'], d['m1'])) rtlib.phaseshift_threaded(data_read, d, d['l1'], d['m1'], u_read, v_read) d['l0'] = d['l1'] d['m0'] = d['m1'] else: logger.debug('Not rephasing.') except KeyError: pass if d['mock']: # could be list or int # assume that std of vis in the middle of the segment is # characteristic of noise throughout the segment falsecands = {} datamid = n.ma.masked_equal(data_read[d['readints']/2].real, 0, copy=True) madstd = 1.4826 * n.ma.median(n.abs(datamid - n.ma.median(datamid)))/n.sqrt(d['npol']*d['nbl']*d['nchan']) std = datamid.std()/n.sqrt(d['npol']*d['nbl']*d['nchan']) logger.debug('Noise per vis in central int: madstd {}, std {}'.format(madstd, std)) dt = 1 # pulse width in integrations if isinstance(d['mock'], int): for i in n.random.randint(d['datadelay'][-1], d['readints'], d['mock']): # add nmock transients at random ints (loff, moff, A, DM) = make_transient(madstd, max(d['dmarr']), Amin=1.2*d['sigma_image1']) candid = (int(segment), int(i), DM, int(dt), int(0)) falsecands[candid] = [A/madstd, A, loff, moff] elif isinstance(d['mock'], list): for mock in d['mock']: try: (i, DM, loff, moff, SNR) = mock candid = (int(segment), int(i), DM, int(dt), int(0)) falsecands[candid] = [SNR, SNR*madstd, loff, moff] except: logger.warn('Could not parse mock parameters: {}'.format(mock)) else: logger.warn('Not a recognized type for mock.') for candid in falsecands: (segment, i, DM, dt, beamnum) = candid (SNR, A, loff, moff) = falsecands[candid] logger.info('Adding mock transient at int %d, DM %.1f, (l, m) = (%f, %f) at est SNR %.1f' % (i, DM, loff, moff, SNR)) add_transient(d, data_read, u_read, v_read, w_read, loff, moff, i, A, DM, dt) if d['savecands']: savecands(d, falsecands, domock=True) with data_mem.get_lock(): data[:] = data_read[:] u[:] = u_read[:]; v[:] = v_read[:]; w[:] = w_read[:] logger.debug('All data unlocked for segment %d' % segment) # d now has segment keyword defined return d def pipeline_reproduce(d, candloc=[], segment=None, lm=None, product='data'): """ Reproduce data and/or candidates with given candloc or lm coordinate. d and segment can be given, if only reading data. candloc is length 5 or 6 with ([scan], segment, candint, dmind, dtind, beamnum). product can be 'data', 'dataph', 'imdata', 'datacorr'. lm is tuple of (l,m) coordinates in radians. """ # set up shared arrays to fill data_reproduce_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2) data_read_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2) data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2) u_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']) u_mem = mps.Array(mps.ctypes.c_float, d['nbl']) v_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']) v_mem = mps.Array(mps.ctypes.c_float, d['nbl']) w_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']) w_mem = mps.Array(mps.ctypes.c_float, d['nbl']) # get numpy views of memory spaces data = numpyview(data_mem, 'complex64', datashape(d)) # optional data_read = numpyview(data_read_mem, 'complex64', datashape(d)) # optional u = numpyview(u_mem, 'float32', d['nbl'], raw=False) v = numpyview(v_mem, 'float32', d['nbl'], raw=False) w = numpyview(w_mem, 'float32', d['nbl'], raw=False) # set up state dict for merge pkl if len(candloc) == 6: scan, segment, candint, dmind, dtind, beamnum = candloc # this is now defined by call to rtpipe.set_pipeline in parsecands.plot_cand # d['scan'] = scan # d['starttime_mjd'] = d['starttime_mjddict'][scan] # d['nsegments'] = len(d['segmenttimesdict'][scan]) # d['segmenttimes'] = d['segmenttimesdict'][scan] elif len(candloc) == 5: # if not a merge pkl, then d['scan'] is correct segment, candint, dmind, dtind, beamnum = candloc elif isinstance(segment, int): assert product == 'data', 'If only providing segment, then only data product can be produced.' else: logger.error('candloc must be length 5 or 6 or segment provided.') return with closing(mp.Pool(1, initializer=initread, initargs=(data_read_mem, u_read_mem, v_read_mem, w_read_mem, data_mem, u_mem, v_mem, w_mem))) as readpool: readpool.apply(pipeline_dataprep, (d, segment)) if product == 'data': logger.info('Returning prepared data...') return data elif product == 'dataph': logger.info('Reproducing data and phasing...') assert lm, 'lm must be tuple with (l, m) coords in radians.' data = runreproduce(d, data_mem, data_reproduce_mem, u, v, w, dmind, dtind, lm=lm) return data elif product == 'datacorr': logger.info('Reproducing data...') data = runreproduce(d, data_mem, data_reproduce_mem, u, v, w, dmind, dtind) return data elif product == 'imdata': logger.info('Reproducing candidate...') im, data = runreproduce(d, data_mem, data_reproduce_mem, u, v, w, dmind, dtind, candint=candint) return im, data else: logger.error('product must be data, dataph, or imdata.') def meantsubpool(d, data_read): """ Wrapper for mean visibility subtraction in time. Doesn't work when called from pipeline using multiprocessing pool. """ logger.info('Subtracting mean visibility in time...') data_read = numpyview(data_read_mem, 'complex64', datashape(d)) tsubpart = partial(rtlib.meantsub, data_read) blranges = [(d['nbl'] * t/d['nthread'], d['nbl']*(t+1)/d['nthread']) for t in range(d['nthread'])] with closing(mp.Pool(1, initializer=initreadonly, initargs=(data_read_mem,))) as tsubpool: tsubpool.map(tsubpart, blr) def dataflag(d, data_read): """ Flagging data in single process """ for flag in d['flaglist']: mode, sig, conv = flag # resultlist = [] # with closing(mp.Pool(4, initializer=initreadonly, initargs=(data_read_mem,))) as flagpool: for ss in d['spw']: chans = n.arange(d['spw_chanr_select'][ss][0], d['spw_chanr_select'][ss][1]) for pol in range(d['npol']): status = rtlib.dataflag(data_read, chans, pol, d, sig, mode, conv) logger.info(status) # hack to get rid of bad spw/pol combos whacked by rfi if 'badspwpol' in d: logger.info('Comparing overall power between spw/pol. Removing those with %d times typical value' % d['badspwpol']) spwpol = {} for spw in d['spw']: chans = n.arange(d['spw_chanr_select'][spw][0], d['spw_chanr_select'][spw][1]) for pol in range(d['npol']): spwpol[(spw, pol)] = n.abs(data_read[:,:,chans,pol]).std() meanstd = n.mean(spwpol.values()) for (spw,pol) in spwpol: if spwpol[(spw, pol)] > d['badspwpol']*meanstd: logger.info('Flagging all of (spw %d, pol %d) for excess noise.' % (spw, pol)) chans = n.arange(d['spw_chanr_select'][spw][0], d['spw_chanr_select'][spw][1]) data_read[:,:,chans,pol] = 0j def dataflagatom(chans, pol, d, sig, mode, conv): """ Wrapper function to get shared memory as numpy array into pool Assumes data_mem is global mps.Array """ data = numpyview(data_mem, 'complex64', datashape(d)) # data = n.ma.masked_array(data, data==0j) # this causes massive overflagging on 14sep03 data return rtlib.dataflag(data, chans, pol, d, sig, mode, conv) def search(d, data_mem, u_mem, v_mem, w_mem): """ Search function. Queues all trials with multiprocessing. Assumes shared memory system with single uvw grid for all images. """ data = numpyview(data_mem, 'complex64', datashape(d)) u = numpyview(u_mem, 'float32', d['nbl']) v = numpyview(v_mem, 'float32', d['nbl']) w = numpyview(w_mem, 'float32', d['nbl']) data_resamp_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2) data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) logger.debug('Search of segment %d' % d['segment']) beamnum = 0 # not yet implemented cands = {} candsfile = getcandsfile(d) if d['savecands'] and os.path.exists(candsfile): logger.warn('candsfile %s already exists' % candsfile) return cands # make wterm kernels if d['searchtype'] == 'image2w': wres = 100 npix = max(d['npixx_full'], d['npixy_full']) bls, uvkers = rtlib.genuvkernels(w, wres, npix, d['uvres'], thresh=0.05) # SUBMITTING THE LOOPS if n.any(data): logger.debug('Searching in %d chunks with %d threads' % (d['nchunk'], d['nthread'])) logger.info('Dedispering to max (DM, dt) of (%d, %d) ...' % (d['dmarr'][-1], d['dtarr'][-1]) ) # open pool with closing(mp.Pool(d['nthread'], initializer=initresamp, initargs=(data_mem, data_resamp_mem))) as resamppool: blranges = [(d['nbl'] * t/d['nthread'], d['nbl']*(t+1)/d['nthread']) for t in range(d['nthread'])] for dmind in xrange(len(d['dmarr'])): dm = d['dmarr'][dmind] logger.debug('Dedispersing for %d' % dm,) dedisppart = partial(correct_dm, d, dm) # moves in fresh data dedispresults = resamppool.map(dedisppart, blranges) dtlast = 1 for dtind in xrange(len(d['dtarr'])): dt = d['dtarr'][dtind] if dt > 1: # dedispersion in shared memory, mapped over baselines # set partial functions for pool.map logger.debug('Resampling for %d' % dt,) resample = dt/dtlast resamppart = partial(correct_dt, d, resample) # corrects in place resampresults = resamppool.map(resamppart, blranges) dtlast = dt # set dm- and dt-dependent int ranges for segment nskip_dm = ((d['datadelay'][-1] - d['datadelay'][dmind]) / dt) * (d['segment'] != 0) # nskip=0 for first segment searchints = (d['readints'] - d['datadelay'][dmind]) / dt - nskip_dm logger.debug('Imaging %d ints from %d for (%d,%d)' % (searchints, nskip_dm, dm, dt),) # imaging in shared memory, mapped over ints image1part = partial(image1, d, u, v, w, dmind, dtind, beamnum) nchunkdt = min(searchints, max(d['nthread'], d['nchunk']/dt)) # parallelize in range bounded by (searchints, nthread) irange = [(nskip_dm + searchints*chunk/nchunkdt, nskip_dm + searchints*(chunk+1)/nchunkdt) for chunk in range(nchunkdt)] imageresults = resamppool.map(image1part, irange) # COLLECTING THE RESULTS per dm/dt. Clears the way for overwriting data_resamp for imageresult in imageresults: for kk in imageresult.keys(): cands[kk] = imageresult[kk] if 'sigma_plot' in d: from rtpipe.reproduce import make_cand_plot as makecp if 'snr2' in d['features']: snrcol = d['features'].index('snr2') elif 'snr1' in d['features']: snrcol = d['features'].index('snr1') snrs = n.array([value[snrcol] for value in cands.itervalues()]) maxsnr = max([0] + [value[snrcol] for value in cands.itervalues()]) # be sure max includes at least one value if maxsnr > d['sigma_plot']: segment, candint, dmind, dtind, beamnum = [key for key, value in cands.iteritems() if value[snrcol] == maxsnr][0] logger.info('Making cand plot for scan %d, segment %d, candint %d, dmind %d, dtint %d with SNR %.1f.' % (d['scan'], segment, candint, dmind, dtind, maxsnr)) im, data = runreproduce(d, data_mem, data_resamp_mem, u, v, w, dmind, dtind, candint) loclabel = [d['scan'], segment, candint, dmind, dtind, beamnum] makecp(d, im, data, loclabel, version=2, snrs=snrs) else: logger.info('No candidate in segment %d above sigma_plot %.1f' % (d['segment'], d['sigma_plot'])) else: logger.warn('Data for processing is zeros. Moving on...') logger.info('Found %d cands in scan %d segment %d of %s. ' % (len(cands), d['scan'], d['segment'], d['filename'])) return cands def runreproduce(d, data_mem, data_resamp_mem, u, v, w, dmind, dtind, candint=-1, lm=None, twindow=30): """ Reproduce function, much like search. Returns image and rephased data for given candint. If no candint is given, it returns resampled data by default. Optionally rephases to lm=(l, m) coordinates. """ data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) with closing(mp.Pool(1, initializer=initresamp, initargs=(data_mem, data_resamp_mem))) as repropool: # dedisperse logger.info('Dedispersing with DM=%.1f, dt=%d...' % (d['dmarr'][dmind], d['dtarr'][dtind])) repropool.apply(correct_dmdt, [d, dmind, dtind, (0,d['nbl'])]) # set up image if 'image1' in d['searchtype']: npixx = d['npixx'] npixy = d['npixy'] elif 'image2' in d['searchtype']: npixx = d['npixx_full'] npixy = d['npixy_full'] if candint > -1: if lm: logger.warn('Using candint image to get l,m. Not using provided l,m.') # image logger.info('Imaging int %d with %d %d pixels...' % (candint, npixx, npixy)) im = repropool.apply(image1wrap, [d, u, v, w, npixx, npixy, candint/d['dtarr'][dtind]]) snrmin = im.min()/im.std() snrmax = im.max()/im.std() logger.info('Made image with SNR min, max: %.1f, %.1f' % (snrmin, snrmax)) if snrmax > -1*snrmin: l1, m1 = calc_lm(d, im, minmax='max') else: l1, m1 = calc_lm(d, im, minmax='min') # rephase and trim interesting ints out repropool.apply(move_phasecenter, [d, l1, m1, u, v]) minint = max(candint/d['dtarr'][dtind]-twindow/2, 0) maxint = min(candint/d['dtarr'][dtind]+twindow/2, len(data_resamp)/d['dtarr'][dtind]) return(im, data_resamp[minint:maxint].mean(axis=1)) else: if lm: l1, m1 = lm repropool.apply(move_phasecenter, [d, l1, m1, u, v]) return data_resamp def add_transient(d, data, u, v, w, l1, m1, i, s, dm=0, dt=1): """ Add a transient to data. l1, m1 are relative direction cosines (location) of transient added at integration i (at highest freq) with brightness s (per int/chan/bl/pol in data units) dm/dt are dispersion (in pc/cm3) and pulse width (in s). """ ang = lambda ch: l1 * u * d['freq'][ch]/d['freq_orig'][0] + m1 * v * d['freq'][ch]/d['freq_orig'][0] delay = lambda ch: n.round(4.1488e-3 * dm * (d['freq'][ch]**(-2) - d['freq'][-1]**(-2))/d['inttime'], 0).astype(int) #snr_ideal = s/(data[i].real.std()/n.sqrt(d['npol']*d['nbl']*d['nchan'])) #logger.info('SNR of source with system brightness %.1f = %d (idealized; ok at low SNR)' % (s, int(snr_ideal))) for ch in range(d['nchan']): data[i+delay(ch):i+delay(ch)+dt, :, ch] += s * n.exp(2j*n.pi*ang(ch)[None,:,None]) def make_transient(std, DMmax, Amin=6., Amax=20., rmax=20., rmin=0., DMmin=0.): """ Produce a mock transient pulse source for the purposes of characterizing the detection success of the current pipeline. Assumes - Code to inject the transients does so by inserting at an array index - Noise level at the center of the data array is characteristic of the noise level throughout Input std - noise level in visibilities(?) at mid-point of segment DMmax - maximum DM at which mock transient can be inserted [pc/cm^3] Amin/Amax is amplitude in units of the std (calculated below) rmax/rmin is radius range in arcmin DMmin is min DM Returns loff - direction cosine offset of mock transient from phase center [radians] moff - direction cosine offset of mock transient from phase center [radians] A - amplitude of transient [std units] DM - dispersion measure of mock transient [pc/cm^3] """ rad_arcmin = math.pi/(180*60) phimin = 0.0 phimax = 2*math.pi # Amplitude of transient, done in units of the std # std is calculated assuming that noise level in the middle of the data, # at index d['readints']/2, is characteristic of that throughout the data A = random.uniform(Amin, Amax) * std # Position of transient, in direction cosines r = random.uniform(rmin, rmax) phi = random.uniform(phimin, phimax) loff = r*math.cos(phi) * rad_arcmin moff = r*math.sin(phi) * rad_arcmin # Dispersion measure DM = random.uniform(DMmin, DMmax) return loff, moff, A, DM def pipeline_refine(d0, candloc, scaledm=2.1, scalepix=2, scaleuv=1.0, chans=[], returndata=False): """ Reproduces candidate and potentially improves sensitivity through better DM and imaging parameters. scale* parameters enhance sensitivity by making refining dmgrid and images. Other options include: d0['selectpol'] = ['RR'] d0['flaglist'] = [('blstd', 2.5, 0.05)] """ import rtpipe.parseparams as pp assert len(candloc) == 6, 'candloc should be (scan, segment, candint, dmind, dtind, beamnum).' scan, segment, candint, dmind, dtind, beamnum = candloc d1 = d0.copy() # dont mess with original (mutable!) segmenttimes = d1['segmenttimesdict'][scan] # if file not at stated full path, assume it is local if not os.path.exists(d1['filename']): workdir = os.getcwd() filename = os.path.join(workdir, os.path.basename(d1['filename'])) else: filename = d1['filename'] # clean up d1 of superfluous keys params = pp.Params() # will be used as input to rt.set_pipeline for key in d1.keys(): if not hasattr(params, key): _ = d1.pop(key) d1['npix'] = 0; d1['uvres'] = 0 d1['savecands'] = False d1['savenoise'] = False d1['logfile'] = False # redefine d. many parameters modified after this to keep from messing up time boundaries/cand location d = set_pipeline(filename, scan, **d1) if chans: d['chans'] = chans d['segmenttimes'] = segmenttimes d['nsegments'] = len(segmenttimes) data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2) u_mem = mps.Array(mps.ctypes.c_float, d['nbl']) v_mem = mps.Array(mps.ctypes.c_float, d['nbl']) w_mem = mps.Array(mps.ctypes.c_float, d['nbl']) data = numpyview(data_mem, 'complex64', datashape(d)) u = numpyview(u_mem, 'float32', d['nbl']) v = numpyview(v_mem, 'float32', d['nbl']) w = numpyview(w_mem, 'float32', d['nbl']) # fill data, uvw data[:] = pipeline_reproduce(d, segment=segment, product='data') d['segment'] = segment u[:], v[:], w[:] = ps.get_uvw_segment(d, segment) # refine parameters dmcand = d['dmarr'][dmind] if scaledm > 1.: try: dmdelta = d['dmarr'][dmind+1] - d['dmarr'][dmind] except IndexError: try: dmdelta = d['dmarr'][dmind] - d['dmarr'][dmind-1] except IndexError: dmdelta = 0.1*dmcand d['dmarr'] = list(n.arange(dmcand-dmdelta, dmcand+dmdelta, dmdelta/scaledm)) elif scaledm == 1.: d['dmarr'] = [dmcand] d['datadelay'] = [rtlib.calc_delay(d['freq'], d['inttime'],dm).max() for dm in d['dmarr']] + [d['datadelay'][-1]] d['dtarr'] = [d['dtarr'][dtind]] d['npixx'] = scalepix*d['npixx'] d['npixy'] = scalepix*d['npixy'] d['uvres'] = scaleuv*d['uvres'] # search logger.info('Refining DM grid to %s and expanding images to (%d, %d) pix with uvres %d' % (str(d['dmarr']), d['npixx'], d['npixy'], d['uvres'])) cands = search(d, data_mem, u_mem, v_mem, w_mem) cands = {tuple([scan]+list(loc)):list(prop) for (loc, prop) in cands.iteritems()} d['featureind'].insert(0, 'scan') # making cand plot from this # need to keep from confusing old and new indices # im, data = rt.pipeline_reproduce(d, loc[candnum], product='imdata') # scan, segment, candint, dmind, dtind, beamnum = loc # loclabel = scan, segment, candint, dmind, dtind, beamnum # make_cand_plot(d, im, data, loclabel, outname=outname) # return info to reproduce/visualize refined cands if returndata: return data else: return d, cands def pipeline_lightcurve(d, l1=0, m1=0, segments=[], scan=-1): """ Makes lightcurve at given (l1, m1) l1, m1 define phase center. if not set, then image max is used. """ if scan == -1: scan = d['scan'] if segments == []: segments = range(d['nsegments']) d = set_pipeline(d['filename'], scan, fileroot=d['fileroot'], dmarr=[0], dtarr=[1], savenoise=False, timesub='', logfile=False, nsegments=d['nsegments']) # define memory and numpy arrays data_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2) data_read_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2) data_resamp_mem = mps.Array(mps.ctypes.c_float, datasize(d)*2) u_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']) u_mem = mps.Array(mps.ctypes.c_float, d['nbl']) v_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']) v_mem = mps.Array(mps.ctypes.c_float, d['nbl']) w_read_mem = mps.Array(mps.ctypes.c_float, d['nbl']) w_mem = mps.Array(mps.ctypes.c_float, d['nbl']) data_read = numpyview(data_read_mem, 'complex64', datashape(d)) # optional u_read = numpyview(u_read_mem, 'float32', d['nbl'], raw=False) v_read = numpyview(v_read_mem, 'float32', d['nbl'], raw=False) w_read = numpyview(w_read_mem, 'float32', d['nbl'], raw=False) lightcurve = n.zeros(shape=(d['nints'], d['nchan'], d['npol']), dtype='complex64') phasecenters = [] with closing(mp.Pool(1, initializer=initread, initargs=(data_read_mem, u_read_mem, v_read_mem, w_read_mem, data_mem, u_mem, v_mem, w_mem))) as readpool: for segment in segments: logger.info('Reading data...') readpool.apply(pipeline_dataprep, (d, segment)) # get image peak for rephasing if not any([l1, m1]): im = sample_image(d, data_read, u_read, v_read, w_read, i=-1, verbose=1, imager='xy') l2, m2 = calc_lm(d, im) else: l2 = l1 m2 = m1 logger.info('Rephasing data to (l, m)=(%.4f, %.4f).' % (l2, m2)) rtlib.phaseshift_threaded(data_read, d, l2, m2, u_read, v_read) phasecenters.append( (l2,m2) ) nskip = (24*3600*(d['segmenttimes'][segment,0] - d['starttime_mjd'])/d['inttime']).astype(int) # insure that lc is set as what is read lightcurve[nskip: nskip+d['readints']] = data_read.mean(axis=1) return phasecenters, lightcurve def set_pipeline(filename, scan, fileroot='', paramfile='', **kwargs): """ Function defines pipeline state for search. Takes data/scan as input. fileroot is base name for associated products (cal files, noise, cands). if blank, it is set to filename. paramfile is name of file that defines all pipeline parameters (python-like syntax). kwargs used to overload paramfile definitions. Many parameters take 0 as default, which auto-defines ideal parameters. This definition does not yet consider memory/cpu/time limitations. nsegments defines how to break jobs in time. nchunk defines how many jobs are sent to nthreads. """ workdir = os.path.dirname(os.path.abspath(filename)) filename = filename.rstrip('/') assert os.path.exists(filename) # then get all metadata if os.path.exists(os.path.join(filename, 'Main.xml')): d = ps.get_metadata(filename, scan, paramfile=paramfile, **kwargs) # can take file name or Params instance d['dataformat'] = 'sdm' else: d = pm.get_metadata(filename, scan, paramfile=paramfile, **kwargs) d['dataformat'] = 'ms' # set version d['rtpipe_version'] = __version__ # define rootname for in/out cal/products if fileroot: d['fileroot'] = fileroot else: d['fileroot'] = os.path.basename(os.path.abspath(filename)) # autodetect calibration products locally if not d['gainfile'] or not os.path.exists(d['gainfile']): # first try to get CASA gain file gainfilelist = glob.glob(os.path.join(d['workdir'], d['fileroot'] + '.g?')) bpfilelist = glob.glob(os.path.join(d['workdir'], d['fileroot'] + '.b?')) # if not in workdir, look locally if not gainfilelist or not bpfilelist: gainfilelist = glob.glob(d['fileroot'] + '.g?') bpfilelist = glob.glob(d['fileroot'] + '.b?') if gainfilelist and bpfilelist: gainfilelist.sort() d['gainfile'] = gainfilelist[-1] logger.info('Autodetected CASA gainfile %s' % d['gainfile']) bpfilelist.sort() d['bpfile'] = bpfilelist[-1] logger.info('Autodetected CASA bpfile %s' % d['bpfile']) # if that fails, look for telcal file filelist = glob.glob(os.path.join(d['workdir'], filename + '.GN')) if not filelist: filelist = glob.glob(filename + '.GN') if filelist: d['gainfile'] = filelist[0] logger.info('Autodetected telcal file %s' % d['gainfile']) if not os.path.exists(d['gainfile']): logger.warn('Calibration file autodetection failed for gainfile {0}'.format(d['gainfile'])) # define features d['featureind'] = ['segment', 'int', 'dmind', 'dtind', 'beamnum'] # feature index. should be stable. if 'features' not in d: if d['searchtype'] == 'image1': d['features'] = ['snr1', 'immax1', 'l1', 'm1'] # features returned by image1 elif d['searchtype'] == 'image1snip': d['features'] = ['snr1', 'immax1', 'l1', 'm1', 'im40', 'spec20'] elif d['searchtype'] == 'image1stats': d['features'] = ['snr1', 'immax1', 'l1', 'm1', 'specstd', 'specskew', 'speckurtosis', 'imskew', 'imkurtosis'] # note: spec statistics are all or nothing. elif 'image2' in d['searchtype']: d['features'] = ['snr1', 'immax1', 'l1', 'm1', 'snr2', 'immax2', 'l2', 'm2'] # features returned by image1 # set imaging parameters to use if d['uvres'] == 0: d['uvres'] = d['uvres_full'] else: urange = d['urange'][scan]*(d['freq'].max()/d['freq_orig'][0]) # uvw from get_uvw already in lambda at ch0 vrange = d['vrange'][scan]*(d['freq'].max()/d['freq_orig'][0]) powers = n.fromfunction(lambda i,j: 2**i*3**j, (14,10), dtype='int') # power array for 2**i * 3**j rangex = n.round(d['uvoversample']*urange).astype('int') rangey = n.round(d['uvoversample']*vrange).astype('int') largerx = n.where(powers-rangex/d['uvres'] > 0, powers, powers[-1,-1]) p2x, p3x = n.where(largerx == largerx.min()) largery = n.where(powers-rangey/d['uvres'] > 0, powers, powers[-1,-1]) p2y, p3y = n.where(largery == largery.min()) d['npixx_full'] = (2**p2x * 3**p3x)[0] d['npixy_full'] = (2**p2y * 3**p3y)[0] # set number of pixels to image d['npixx'] = d['npixx_full'] d['npixy'] = d['npixy_full'] if 'npix_max' in d: if d['npix_max']: d['npixx'] = min(d['npix_max'], d['npixx_full']) d['npixy'] = min(d['npix_max'], d['npixy_full']) if d['npix']: d['npixx'] = d['npix'] d['npixy'] = d['npix'] else: d['npix'] = max(d['npixx'], d['npixy']) # this used to define fringe time # define dmarr, if not already if len(d['dmarr']) == 0: if d.has_key('dm_maxloss') and d.has_key('maxdm') and d.has_key('dm_pulsewidth'): d['dmarr'] = calc_dmgrid(d, maxloss=d['dm_maxloss'], maxdm=d['maxdm'], dt=d['dm_pulsewidth']) if d['maxdm'] > 0: logger.info('Calculated %d dms for max sensitivity loss %.2f, maxdm %d pc/cm3, and pulse width %d ms' % (len(d['dmarr']), d['dm_maxloss'], d['maxdm'], d['dm_pulsewidth']/1000)) else: d['dmarr'] = [0] logger.info('Can\'t calculate dm grid without dm_maxloss, maxdm, and dm_pulsewidth defined. Setting to [0].') # define times for data to read d['t_overlap'] = rtlib.calc_delay(d['freq'], d['inttime'], max(d['dmarr'])).max()*d['inttime'] # time of overlap for total dm coverage at segment boundaries d['datadelay'] = [rtlib.calc_delay(d['freq'], d['inttime'],dm).max() for dm in d['dmarr']] d['nints'] = d['nints'] - d['nskip'] # pols if d.has_key('selectpol'): d['pols'] = [pol for pol in d['pols_orig'] if pol in d['selectpol']] else: d['pols'] = d['pols_orig'] d['npol'] = len(d['pols']) # split imaging into chunks. ideally one per thread, but can modify to fit available memory if d['nchunk'] == 0: d['nchunk'] = d['nthread'] # if nsegments is 0, then auto-define within memory limit if not d['nsegments']: fringetime = calc_fringetime(d) d['nsegments'] = max(1, min(d['nints'], int(d['scale_nsegments']*d['inttime']*d['nints']/(fringetime-d['t_overlap'])))) # at least 1, at most nints calc_segment_times(d) # if auto nsegment definition makes segment too large, try to scale it down to fit in memory_limit (if provided) # limit defined for dm sweep time and max nchunk/nthread ratio if d.has_key('memory_limit'): (vismem0, immem0) = calc_memory_footprint(d, limit=True) assert vismem0+immem0 < d['memory_limit'], 'memory_limit of {0} is smaller than best solution of {1}. Try forcing nsegments/nchunk larger than {2}/{3} or reducing maxdm/npix'.format(d['memory_limit'], vismem0+immem0, d['nsegments'], max(d['dtarr'])/min(d['dtarr'])) (vismem, immem) = calc_memory_footprint(d) if vismem+immem > d['memory_limit']: logger.info('Over memory limit of {4} when reading {0} segments with {1} chunks ({2}/{3} GB for visibilities/imaging). Searching for solution down to {5}/{6} GB...'.format(d['nsegments'], d['nchunk'], vismem, immem, d['memory_limit'], vismem0, immem0)) while vismem+immem > d['memory_limit']: (vismem, immem) = calc_memory_footprint(d) logger.debug('Using {0} segments with {1} chunks ({2}/{3} GB for visibilities/imaging). Searching for better solution...'.format(d['nchunk'], vismem, immem, d['memory_limit'])) d['scale_nsegments'] = d['scale_nsegments'] * (vismem+immem)/float(d['memory_limit']) d['nsegments'] = max(1, min(d['nints'], int(d['scale_nsegments']*d['inttime']*d['nints']/(fringetime-d['t_overlap'])))) # at least 1, at most nints calc_segment_times(d) (vismem, immem) = calc_memory_footprint(d) while vismem+immem > d['memory_limit']: logger.debug('Doubling nchunk from %d to fit in %d GB memory limit.' % (d['nchunk'], d['memory_limit'])) d['nchunk'] = 2*d['nchunk'] (vismem, immem) = calc_memory_footprint(d) if d['nchunk'] >= max(d['dtarr'])/min(d['dtarr'])*d['nthread']: # limit nchunk/nthread to at most the range in dt d['nchunk'] = d['nthread'] break (vismem, immem) = calc_memory_footprint(d) # final set up of memory calc_segment_times(d) (vismem, immem) = calc_memory_footprint(d) # scaling of number of integrations beyond dt=1 assert all(d['dtarr']) and (d['dtarr'] == sorted(d['dtarr'])), 'dtarr must be larger than 0 and in increasing order' # calculate number of thermal noise candidates per segment nfalse = calc_nfalse(d) logger.info('') logger.info('Pipeline summary:') if '.GN' in d['gainfile']: logger.info('\t Products saved with %s. telcal calibration with %s' % (d['fileroot'], os.path.basename(d['gainfile']))) else: logger.info('\t Products saved with %s. CASA calibration files (%s, %s)' % (d['fileroot'], os.path.basename(d['gainfile']), os.path.basename(d['bpfile']))) logger.info('\t Using %d segment%s of %d ints (%.1f s) with overlap of %.1f s' % (d['nsegments'], "s"[not d['nsegments']-1:], d['readints'], d['t_segment'], d['t_overlap'])) if d['t_overlap'] > d['t_segment']/3.: logger.info('\t\t Lots of segments needed, since Max DM sweep (%.1f s) close to segment size (%.2f s)' % (d['t_overlap'], d['t_segment'])) logger.info('\t Downsampling in time/freq by %d/%d and skipping %d ints from start of scan.' % (d['read_tdownsample'], d['read_fdownsample'], d['nskip'])) logger.info('\t Excluding ants %s' % (d['excludeants'])) logger.info('\t Using pols %s' % (d['pols'])) logger.info('') logger.info('\t Search with %s and threshold %.1f.' % (d['searchtype'], d['sigma_image1'])) logger.info('\t Using %d DMs from %.1f to %.1f and dts %s.' % (len(d['dmarr']), min(d['dmarr']), max(d['dmarr']), d['dtarr'])) logger.info('\t Using uvgrid npix=(%d,%d) and res=%d.' % (d['npixx'], d['npixy'], d['uvres'])) logger.info('\t Expect %d thermal false positives per segment.' % nfalse) logger.info('') logger.info('\t Visibility memory usage is %.1f GB/segment' % vismem) logger.info('\t Imaging in %d chunk%s using max of %.1f GB/segment' % (d['nchunk'], "s"[not d['nsegments']-1:], immem)) logger.info('\t Grand total memory usage: %.1f GB/segment' % (vismem + immem)) return d def getcandsfile(d, segment=-1, domock=False): """ Return name of candsfile for a given dictionary. Must have d['segment'] defined. domock is option to save simulated cands. """ if domock: prefix = 'candsmock_' else: prefix= 'cands_' if d.has_key('segment'): return os.path.join(d['workdir'], prefix + d['fileroot'] + '_sc' + str(d['scan']) + 'seg' + str(d['segment']) + '.pkl') elif segment >= 0: return os.path.join(d['workdir'], prefix + d['fileroot'] + '_sc' + str(d['scan']) + 'seg' + str(segment) + '.pkl') else: return '' def getnoisefile(d, segment=-1): """ Return name of noisefile for a given dictionary. Must have d['segment'] defined. """ if d.has_key('segment'): return os.path.join(d['workdir'], 'noise_' + d['fileroot'] + '_sc' + str(d['scan']) + 'seg' + str(d['segment']) + '.pkl') elif segment >= 0: return os.path.join(d['workdir'], 'noise_' + d['fileroot'] + '_sc' + str(d['scan']) + 'seg' + str(segment) + '.pkl') else: return '' def calc_nfalse(d): """ Calculate the number of thermal-noise false positives per segment. """ dtfactor = n.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm ntrials = d['readints'] * dtfactor * len(d['dmarr']) * d['npixx'] * d['npixy'] qfrac = 1 - (erf(d['sigma_image1']/n.sqrt(2)) + 1)/2. nfalse = int(qfrac*ntrials) return nfalse def calc_segment_times(d): """ Helper function for set_pipeline to define segmenttimes list, given nsegments definition """ # this casts to int (flooring) to avoid 0.5 int rounding issue. stopdts = n.linspace(d['nskip']+d['t_overlap']/d['inttime'], d['nints'], d['nsegments']+1)[1:] # nseg+1 assures that at least one seg made startdts = n.concatenate( ([d['nskip']], stopdts[:-1]-d['t_overlap']/d['inttime']) ) segmenttimes = [] for (startdt, stopdt) in zip(d['inttime']*startdts, d['inttime']*stopdts): starttime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['starttime_mjd']+startdt/(24*3600),'d'),form=['ymd'], prec=9)[0], 's'))[0]/(24*3600) stoptime = qa.getvalue(qa.convert(qa.time(qa.quantity(d['starttime_mjd']+stopdt/(24*3600), 'd'), form=['ymd'], prec=9)[0], 's'))[0]/(24*3600) segmenttimes.append((starttime, stoptime)) d['segmenttimes'] = n.array(segmenttimes) totaltimeread = 24*3600*(d['segmenttimes'][:, 1] - d['segmenttimes'][:, 0]).sum() # not guaranteed to be the same for each segment d['readints'] = n.round(totaltimeread / (d['inttime']*d['nsegments'])).astype(int) d['t_segment'] = totaltimeread/d['nsegments'] def calc_memory_footprint(d, headroom=4., visonly=False, limit=False): """ Given pipeline state dict, this function calculates the memory required to store visibilities and make images. headroom scales visibility memory size from single data object to all copies (and potential file read needs) limit=True returns a the minimum memory configuration Returns tuple of (vismem, immem) in units of GB. """ toGB = 8/1024.**3 # number of complex64s to GB d0 = d.copy() # limit defined for dm sweep time and max nchunk/nthread ratio if limit: d0['readints'] = d['t_overlap']/d['inttime'] d0['nchunk'] = max(d['dtarr'])/min(d['dtarr']) * d['nthread'] vismem = headroom * datasize(d0) * toGB if visonly: return vismem else: immem = d0['nthread'] * (d0['readints']/d0['nchunk'] * d0['npixx'] * d0['npixy']) * toGB return (vismem, immem) def calc_fringetime(d): """ Estimate largest time span of a "segment". A segment is the maximal time span that can be have a single bg fringe subtracted and uv grid definition. Max fringe window estimated for 5% amp loss at first null averaged over all baselines. Assumes dec=+90, which is conservative. Returns time in seconds that defines good window. """ maxbl = d['uvres']*d['npix']/2 # fringe time for imaged data only fringetime = 0.5*(24*3600)/(2*n.pi*maxbl/25.) # max fringe window in seconds return fringetime def correct_dmdt(d, dmind, dtind, blrange): """ Dedisperses and resamples data *in place*. Drops edges, since it assumes that data is read with overlapping chunks in time. """ data = numpyview(data_mem, 'complex64', datashape(d)) data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) bl0,bl1 = blrange data_resamp[:, bl0:bl1] = data[:, bl0:bl1] rtlib.dedisperse_resample(data_resamp, d['freq'], d['inttime'], d['dmarr'][dmind], d['dtarr'][dtind], blrange, verbose=0) # dedisperses data. def correct_dm(d, dm, blrange): """ Dedisperses data into data_resamp Drops edges, since it assumes that data is read with overlapping chunks in time. """ data = numpyview(data_mem, 'complex64', datashape(d)) data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) bl0,bl1 = blrange data_resamp[:, bl0:bl1] = data[:, bl0:bl1] rtlib.dedisperse_par(data_resamp, d['freq'], d['inttime'], dm, blrange, verbose=0) # dedisperses data. def correct_dt(d, dt, blrange): """ Resamples data_resamp Drops edges, since it assumes that data is read with overlapping chunks in time. """ data = numpyview(data_mem, 'complex64', datashape(d)) data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) bl0,bl1 = blrange rtlib.resample_par(data_resamp, d['freq'], d['inttime'], dt, blrange, verbose=0) # dedisperses data. def calc_lm(d, im=[], pix=(), minmax='max'): """ Helper function to calculate location of image pixel in (l,m) coords. Assumes peak pixel, but input can be provided in pixel units. minmax defines whether to look for image maximum or minimum. """ if len(pix) == 0: # default is to get pixel from image if minmax == 'max': peakl, peakm = n.where(im == im.max()) elif minmax == 'min': peakl, peakm = n.where(im == im.min()) peakl = peakl[0]; peakm = peakm[0] elif len(pix) == 2: # can also specify peakl, peakm = pix if len(im): npixx, npixy = im.shape else: npixx = d['npixx'] npixy = d['npixy'] l1 = (npixx/2. - peakl)/(npixx*d['uvres']) m1 = (npixy/2. - peakm)/(npixy*d['uvres']) return l1, m1 def move_phasecenter(d, l1, m1, u, v): """ Handler function for phaseshift_threaded """ logger.info('Rephasing data to (l, m)=(%.4f, %.4f).' % (l1, m1)) data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) rtlib.phaseshift_threaded(data_resamp, d, l1, m1, u, v) def calc_dmgrid(d, maxloss=0.05, dt=3000., mindm=0., maxdm=0.): """ Function to calculate the DM values for a given maximum sensitivity loss. maxloss is sensitivity loss tolerated by dm bin width. dt is assumed pulse width in microsec. """ # parameters tsamp = d['inttime']*1e6 # in microsec k = 8.3 freq = d['freq'].mean() # central (mean) frequency in GHz bw = 1e3*(d['freq'][-1] - d['freq'][0]) ch = 1e3*(d['freq'][1] - d['freq'][0]) # channel width in MHz # width functions and loss factor dt0 = lambda dm: n.sqrt(dt**2 + tsamp**2 + ((k*dm*ch)/(freq**3))**2) dt1 = lambda dm, ddm: n.sqrt(dt**2 + tsamp**2 + ((k*dm*ch)/(freq**3))**2 + ((k*ddm*bw)/(freq**3.))**2) loss = lambda dm, ddm: 1 - n.sqrt(dt0(dm)/dt1(dm,ddm)) loss_cordes = lambda ddm, dfreq, dt, freq: 1 - (n.sqrt(n.pi) / (2 * 6.91e-3 * ddm * dfreq / (dt*freq**3))) * erf(6.91e-3 * ddm * dfreq / (dt*freq**3)) # not quite right for underresolved pulses if maxdm == 0: return [0] else: # iterate over dmgrid to find optimal dm values. go higher than maxdm to be sure final list includes full range. dmgrid = n.arange(mindm, maxdm, 0.05) dmgrid_final = [dmgrid[0]] for i in range(len(dmgrid)): ddm = (dmgrid[i] - dmgrid_final[-1])/2. ll = loss(dmgrid[i],ddm) if ll > maxloss: dmgrid_final.append(dmgrid[i]) return dmgrid_final def image1(d, u, v, w, dmind, dtind, beamnum, irange): """ Parallelizable function for imaging a chunk of data for a single dm. Assumes data is dedispersed and resampled, so this just images each integration. Simple one-stage imaging that returns dict of params. returns dictionary with keys of cand location and values as tuple of features """ i0, i1 = irange data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) # logger.info('i0 {0}, i1 {1}, dm {2}, dt {3}, len {4}'.format(i0, i1, dmind, dtind, len(data_resamp))) ims,snr,candints = rtlib.imgallfullfilterxyflux(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data_resamp[i0:i1], d['npixx'], d['npixy'], d['uvres'], d['sigma_image1']) # logger.info('finished imaging candints {0}'.format(candints)) feat = {} for i in xrange(len(candints)): if snr[i] > 0: l1, m1 = calc_lm(d, ims[i], minmax='max') else: l1, m1 = calc_lm(d, ims[i], minmax='min') logger.info('Got one! Int=%d, DM=%d, dt=%d: SNR_im=%.1f @ (%.2e,%.2e).' % ((i0+candints[i])*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], l1, m1)) candid = (d['segment'], (i0+candints[i])*d['dtarr'][dtind], dmind, dtind, beamnum) # logger.info(candid) # assemble feature in requested order ff = [] for feature in d['features']: if feature == 'snr1': ff.append(snr[i]) elif feature == 'immax1': if snr[i] > 0: ff.append(ims[i].max()) else: ff.append(ims[i].min()) elif feature == 'l1': ff.append(l1) elif feature == 'm1': ff.append(m1) elif feature == 'im40': # 40 pixel image peak cutout peakx, peaky = n.where(ims[i] == ims[i].max()) sizex, sizey = ims[i].shape # set image window with min=0 and max=size xmin = max(0, peakx - 20); xmax = min(peakx + 20, sizex) ymin = max(0, peaky - 20); ymax = min(peaky + 20, sizey) ff.append(ims[i][xmin:xmax,ymin:ymax]) elif feature == 'spec20': # 20 int spectrum cutout # set int window with min 0 and max len() imin = max(0, (i0+candints[i])*d['dtarr'][dtind] - 10) imax = min( (i0+candints[i])*d['dtarr'][dtind] + 10, len(data_resamp)) data_cut = data_resamp[imin:imax].copy() rtlib.phaseshift_threaded(data_cut, d, l1, m1, u, v) ff.append(data_cut.mean(axis=1)) elif feature in ['specstd', 'specskew', 'speckurtosis']: # this is standard set and must all appear together if feature == 'specstd': # first this one, then others will use same data seli = (i0+candints[i])*d['dtarr'][dtind] datasel = data_resamp[seli:seli+1].copy() rtlib.phaseshift_threaded(datasel, d, l1, m1, u, v) data = n.ma.masked_equal(datasel, 0j) spec = data.mean(axis=3).mean(axis=1).mean(axis=0).real std = spec.std(axis=0) ff.append(std) elif feature == 'specskew': skew = float(mstats.skew(spec)) ff.append(skew) elif feature == 'speckurtosis': kurtosis = float(mstats.kurtosis(spec)) ff.append(kurtosis) elif feature == 'imskew': skew = float(mstats.skew(ims[i].flatten())) ff.append(skew) elif feature == 'imkurtosis': kurtosis = float(mstats.kurtosis(ims[i].flatten())) ff.append(kurtosis) feat[candid] = list(ff) return feat def image2(d, i0, i1, u, v, w, dmind, dtind, beamnum): """ Parallelizable function for imaging a chunk of data for a single dm. Assumes data is dedispersed and resampled, so this just images each integration. Two-stage imaging uses ideal uv coverage in second image. returns dictionary with keys of cand location and values as tuple of features """ data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) ims,snr,candints = rtlib.imgallfullfilterxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data_resamp[i0:i1], d['npixx'], d['npixy'], d['uvres'], d['sigma_image1']) feat = {} for i in xrange(len(candints)): # reimage im2 = rtlib.imgonefullxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data_resamp[i0+candints[i]], d['npixx_full'], d['npixy_full'], d['uvres'], verbose=0) # find most extreme pixel snrmax = im2.max()/im2.std() snrmin = im2.min()/im2.std() if snrmax >= abs(snrmin): snr2 = snrmax else: snr2 = snrmin # threshold if abs(snr2) > d['sigma_image2']: # calc loc in first image if snr[i] > 0: l1, m1 = calc_lm(d, ims[i], minmax='max') else: l1, m1 = calc_lm(d, ims[i], minmax='min') # calc src loc in second image if snr2 > 0: l2, m2 = calc_lm(d, im2, minmax='max') else: l2, m2 = calc_lm(d, im2, minmax='min') logger.info('Got one! Int=%d, DM=%d, dt=%d: SNR_im1=%.1f, SNR_im2=%.1f @ (%.2e,%.2e).' % ((i0+candints[i])*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], snr2, l2, m2)) candid = (d['segment'], (i0+candints[i])*d['dtarr'][dtind], dmind, dtind, beamnum) # assemble feature in requested order ff = [] for feature in d['features']: if feature == 'snr1': ff.append(snr[i]) elif feature == 'immax1': if snr[i] > 0: ff.append(ims[i].max()) else: ff.append(ims[i].min()) elif feature == 'l1': ff.append(l1) elif feature == 'm1': ff.append(m1) elif feature == 'snr2': ff.append(snr2) elif feature == 'immax2': if snr2 > 0: ff.append(im2.max()) else: ff.append(im2.min()) elif feature == 'l2': ff.append(l2) elif feature == 'm2': ff.append(m2) feat[candid] = list(ff) else: logger.info('Almost... Int=%d, DM=%d, dt=%d: SNR_im1=%.1f, SNR_im2=%.1f.' % ((i0+candints[i])*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], snr2)) return feat def image2w(d, i0, i1, u, v, w, dmind, dtind, beamnum, bls, uvkers): """ Parallelizable function for imaging a chunk of data for a single dm. Assumes data is dedispersed and resampled, so this just images each integration. Two-stage imaging uses ideal uv coverage in second image. returns dictionary with keys of cand location and values as tuple of features """ data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) ims,snr,candints = rtlib.imgallfullfilterxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data_resamp[i0:i1], d['npixx'], d['npixy'], d['uvres'], d['sigma_image1']) feat = {} for i in xrange(len(candints)): # reimage npix = max(d['npixx_full'], d['npixy_full']) im2 = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data_resamp[i0+candints[i]], npix, d['uvres'], bls, uvkers, verbose=1) # find most extreme pixel snrmax = im2.max()/im2.std() snrmin = im2.min()/im2.std() if snrmax >= abs(snrmin): snr2 = snrmax else: snr2 = snrmin # threshold if abs(snr2) > d['sigma_image2']: # calc loc in first image if snr[i] > 0: l1, m1 = calc_lm(d, ims[i], minmax='max') else: l1, m1 = calc_lm(d, ims[i], minmax='min') # calc src loc in second image if snr2 > 0: l2, m2 = calc_lm(d, im2, minmax='max') else: l2, m2 = calc_lm(d, im2, minmax='min') logger.info('Got one! Int=%d, DM=%d, dt=%d: SNR_im1=%.1f, SNR_im2=%.1f @ (%.2e,%.2e).' % ((i0+candints[i])*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], snr2, l2, m2)) candid = (d['segment'], (i0+candints[i])*d['dtarr'][dtind], dmind, dtind, beamnum) # assemble feature in requested order ff = [] for feature in d['features']: if feature == 'snr1': ff.append(snr[i]) elif feature == 'immax1': if snr[i] > 0: ff.append(ims[i].max()) else: ff.append(ims[i].min()) elif feature == 'l1': ff.append(l1) elif feature == 'm1': ff.append(m1) elif feature == 'snr2': ff.append(snr2) elif feature == 'immax2': if snr2 > 0: ff.append(im2.max()) else: ff.append(im2.min()) elif feature == 'l2': ff.append(l2) elif feature == 'm2': ff.append(m2) feat[candid] = list(ff) else: logger.info('Almost... Int=%d, DM=%d, dt=%d: SNR_im1=%.1f, SNR_im2=%.1f.' % ((i0+candints[i])*d['dtarr'][dtind], d['dmarr'][dmind], d['dtarr'][dtind], snr[i], snr2)) return feat def image1wrap(d, u, v, w, npixx, npixy, candint): """ Parallelizable function for imaging a chunk of data for a single dm. Assumes data is dedispersed and resampled, so this just images each integration. Simple one-stage imaging that returns dict of params. returns dictionary with keys of cand location and values as tuple of features """ data_resamp = numpyview(data_resamp_mem, 'complex64', datashape(d)) image = rtlib.imgonefullxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data_resamp[candint], npixx, npixy, d['uvres'], verbose=1) return image def imagearm(sdmfile, scan, segment, npix=512, res=50, **kwargs): """ Function to do end-to-end 1d, arm-based imaging """ import sdmpy sdm = sdmpy.SDM(sdmfile) ants = {ant.stationId:ant.name for ant in sdm['Antenna']} stations = {st.stationId: st.name for st in sdm['Station'] if 'X' not in str(st.name)} west = [int(str(ants[st]).lstrip('ea')) for st in stations if 'W' in str(stations[st])] east = [int(str(ants[st]).lstrip('ea')) for st in stations if 'E' in str(stations[st])] north = [int(str(ants[st]).lstrip('ea')) for st in stations if 'N' in str(stations[st])] d = set_pipeline(sdmfile, scan, **kwargs) blarr = rtlib.calc_blarr(d) selwest = [i for i in range(len(blarr)) if all([b in west for b in blarr[i]])] seleast = [i for i in range(len(blarr)) if all([b in east for b in blarr[i]])] selnorth = [i for i in range(len(blarr)) if all([b in north for b in blarr[i]])] u,v,w = ps.get_uvw_segment(d, segment=segment) data = pipeline_reproduce(d, segment=segment, product='data') dataw = data[:,selwest].mean(axis=3).mean(axis=2) datae = data[:,seleast].mean(axis=3).mean(axis=2) datan = data[:,selnorth].mean(axis=3).mean(axis=2) uw = u[selwest] ue = u[seleast] un = u[selnorth] vw = v[selwest] ve = v[seleast] vn = v[selnorth] grid = n.zeros((len(data), npix), dtype='complex64') grid2 = n.zeros((len(data), npix), dtype='float32') datalist = [] for (uu, vv, dd) in [(uw, vw, dataw), (ue, ve, datae), (un, vn, datan)]: # uu = n.round(uu/res).astype(int) # vv = n.round(vv/res).astype(int) uu = n.mod(uu/res, npix) vv = n.mod(vv/res, npix) uv = n.sqrt(uu**2 + vv**2) uv = n.round(uv).astype(int) for i in range(len(uv)): if uv[i] < 512: grid[:, uv[i]] = dd[:, i] grid2 = n.fft.ifft(grid, axis=1).real datalist.append(grid2) return datalist def sample_image(d, data, u, v, w, i=-1, verbose=0, imager='xy', wres=100): """ Samples one integration and returns image i is integration to image. Default is mid int. """ if i == -1: i = len(data)/2 if imager == 'xy': image = rtlib.imgonefullxy(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], d['npixx'], d['npixy'], d['uvres'], verbose=verbose) elif imager == 'w': npix = max(d['npixx'], d['npixy']) bls, uvkers = rtlib.genuvkernels(w, wres, npix, d['uvres'], ksize=21, oversample=1) image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], bls, uvkers, verbose=verbose) # bls, lmkers = rtlib.genlmkernels(w, wres, npix, d['uvres']) # image = rtlib.imgonefullw(n.outer(u, d['freq']/d['freq_orig'][0]), n.outer(v, d['freq']/d['freq_orig'][0]), data[i], npix, d['uvres'], [bls[0]], [lmkers[0]], verbose=verbose) return image def estimate_noiseperbl(data): """ Takes large data array and sigma clips it to find noise per bl for input to detect_bispectra. Takes mean across pols and channels for now, as in detect_bispectra. """ # define noise per baseline for data seen by detect_bispectra or image datamean = data.mean(axis=2).imag # use imaginary part to estimate noise without calibrated, on-axis signal (datameanmin, datameanmax) = rtlib.sigma_clip(datamean.flatten()) good = n.where( (datamean>datameanmin) & (datamean<datameanmax) ) noiseperbl = datamean[good].std() # measure single noise for input to detect_bispectra logger.debug('Clipped to %d%% of data (%.3f to %.3f). Noise = %.3f.' % (100.*len(good[0])/len(datamean.flatten()), datameanmin, datameanmax, noiseperbl)) return noiseperbl def noisepickle(d, data, u, v, w, chunk=200): """ Calculates noise properties and saves values to pickle. chunk defines window for measurement. at least one measurement always made. """ if d['savenoise']: noisefile = getnoisefile(d) if os.path.exists(noisefile): logger.warn('noisefile %s already exists' % noisefile) else: nints = len(data) chunk = min(chunk, nints) # ensure at least one measurement results = [] rr = range(0, nints, chunk) if len(rr) == 1: rr.append(1) # hack. need to make sure it iterates for nints=1 case for i in range(len(rr)-1): imid = (rr[i]+rr[i+1])/2 noiseperbl = estimate_noiseperbl(data[rr[i]:rr[i+1]]) imstd = sample_image(d, data, u, v, w, imid, verbose=0).std() zerofrac = float(len(n.where(data[rr[i]:rr[i+1]] == 0j)[0]))/data[rr[i]:rr[i+1]].size results.append( (d['segment'], noiseperbl, zerofrac, imstd) ) with open(noisefile, 'a') as pkl: pickle.dump(results, pkl) logger.info('Wrote %d noise measurement%s to %s.' % (len(results), 's'[:len(results)-1], noisefile)) def savecands(d, cands, domock=False): """ Save all candidates in pkl file for later aggregation and filtering. domock is option to save simulated cands file """ with open(getcandsfile(d, domock=domock), 'w') as pkl: pickle.dump(d, pkl) pickle.dump(cands, pkl) def datashape(d): return (d['readints']/d['read_tdownsample'], d['nbl'], d['nchan']/d['read_fdownsample'], d['npol']) def datasize(d): return long(d['readints']*d['nbl']*d['nchan']*d['npol']/(d['read_tdownsample']*d['read_fdownsample'])) def numpyview(arr, datatype, shape, raw=False): """ Takes mp shared array and returns numpy array with given shape. """ if raw: return n.frombuffer(arr, dtype=n.dtype(datatype)).view(n.dtype(datatype)).reshape(shape) # for shared mps.RawArray else: return n.frombuffer(arr.get_obj(), dtype=n.dtype(datatype)).view(n.dtype(datatype)).reshape(shape) # for shared mp.Array def initreadonly(shared_arr_): global data_read_mem data_read_mem = shared_arr_ # must be inhereted, not passed as an argument def initresamp(shared_arr_, shared_arr2_): global data_mem, data_resamp_mem data_mem = shared_arr_ data_resamp_mem = shared_arr2_ def initread(shared_arr1_, shared_arr2_, shared_arr3_, shared_arr4_, shared_arr5_, shared_arr6_, shared_arr7_, shared_arr8_): global data_read_mem, u_read_mem, v_read_mem, w_read_mem, data_mem, u_mem, v_mem, w_mem data_read_mem = shared_arr1_ # must be inhereted, not passed as an argument u_read_mem = shared_arr2_ v_read_mem = shared_arr3_ w_read_mem = shared_arr4_ data_mem = shared_arr5_ u_mem = shared_arr6_ v_mem = shared_arr7_ w_mem = shared_arr8_
import glob, os from ez_trans import compress, carrier, config, logger class engine(object): """work engine in charge of compressing and transfer files""" log = logger.SimpleLogger() def __init__(self, config, log=None): self.config = config if log: self.log = log def do_transfer(self, folder, relpath, zip_name, volume_size): """ doing the job to compress and then transfer. """ # # Compress # relative_path = os.path.relpath(folder, self.config.base_dir) source = os.path.join(folder, relpath) work_folder = os.path.join(self.config.working_folder, relative_path, self.config.key) zip_file = os.path.join(work_folder, zip_name) zipper = compress.compresser_7z(self.config) result = zipper.compress(folder, relpath, zip_file, volume_size) if not result: self.log.error("failed to create compress file, please check logs!") return False zip_files = glob.glob(zipper.zip_file + '*') self.log.message("zip files created: %s" %zip_files) # # Copy or FTP? FTP only now pass # # transfer the compressed files by FTP # target = self.config.trans_target transfer = carrier.ftp_carrier(target.server, target.user, target.password, target.port) ftp_folder = os.path.join(self.config.trans_target.relative_path, relative_path) result = transfer.upload(zip_files, ftp_folder) # if result: self.log.message ('upload done. clean up zip files %s' % zip_files) # if delete compressed file on server try: for zip in zip_files: os.remove(zip) pass except Exception as e: self.log.error('error in cleanup. e=%s'%e) pass else: self.log.error("upload failed! please check target and permission") return False return True
from django.urls import path from . import views from .views import MyTokenObtainPairView from rest_framework_simplejwt.views import ( TokenRefreshView, ) urlpatterns = [ path('', views.getRoutes), path('register/', views.register, name="register" ), path('get-budget/', views.getBudget, name='getBudget'), path('set-budget/', views.setBudget, name='setBudget'), path('add-person/', views.addPerson, name='addPerson'), path('get-person/', views.getPerson, name='getPerson'), path('add-gift/', views.addGift, name='addGift'), path('delete-gift/<str:id>/', views.deleteGift, name='deleteGift'), path('gifts/', views.getGifts), path('add-tracking/', views.addTrackingData, name='addTrackingData'), path('get-tracking/', views.getTrackingData, name='getTrackingData'), path('send-message/', views.sendMessage, name='sendMessage'), path('token/', MyTokenObtainPairView.as_view(), name='token_obtain_pair'), path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'), ]
#!/usr/bin/env python __author__ = 'litrin.jiang@intel.com' import os import tornado.ioloop import tornado.web import tornado.httpclient from tornado.options import define, options, parse_command_line define("api_url", default="http://10.239.163.50:888", help="api url") define("port", default="80", help="service port") define("debug", default="", help="debug mode") define("pid_file", default="api2api.pid", help="pid file") class SwitchOperation(tornado.web.RequestHandler): @tornado.web.asynchronous def get(self): http_client = tornado.httpclient.AsyncHTTPClient() http_client.fetch(self.get_url(), callback=self.on_fetch) @tornado.web.asynchronous def post(self): a = self.request.body http_client = tornado.httpclient.AsyncHTTPClient() http_client.fetch(self.get_url(), method='POST', body=a, callback=self.on_fetch) def get_url(self): url_base=options.api_url return url_base + self.request.path def on_fetch(self, a): self.write(str(a.body)) self.finish() def save_pid(pid_file, pid): with open(pid_file, "w") as f: f.write(str(pid)) def main(): parse_command_line() pid = os.fork() if options.debug == "" and pid != 0: save_pid(options.pid_file, pid) exit() application = tornado.web.Application([ (r".*", SwitchOperation), ]) application.listen(int(options.port)) tornado.ioloop.IOLoop.current().start() if __name__ == "__main__": main()
import math import os import cv2 import imageio as io import moviepy import moviepy.editor import numpy as np import torch import torch.nn.functional as F import torchvision from tqdm import tqdm from models.disparity_estimation import Disparity, Semantics from models.disparity_refinement import Refine from models.disparity_refinement_pretrained import Refine as RefineP from models.partial_inpainting import Inpaint as PartialInpaint from models.pointcloud_inpainting import Inpaint from utils.common import depth_to_points, process_kenburns from utils.utils import device, load_models, resize_image class Pipeline(): def __init__(self, model_paths=None, partial_inpainting=False, dolly=False, output_frames=False, pretrain=False, d2=False): self.objectCommon = {} self.objectCommon['dblFocal'] = 1024.0/2 self.objectCommon['dblBaseline'] = 120 self.partial_inpainting = partial_inpainting self.dolly = dolly self.output_frames = output_frames self.d2 = d2 self.moduleSemantics = Semantics().to(device).eval() self.moduleDisparity = Disparity().to(device).eval() self.moduleMaskrcnn = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True).to(device).eval() if pretrain: self.moduleRefine = RefineP().to(device).eval() else: self.moduleRefine = Refine().to(device).eval() if self.partial_inpainting: self.moduleInpaint = PartialInpaint().to(device).eval() else: self.moduleInpaint = Inpaint().to(device).eval() models_list = [{'model':self.moduleDisparity, 'type':'disparity'}, {'model':self.moduleRefine, 'type':'refine'}, {'model':self.moduleInpaint, 'type':'inpaint'}] if len(model_paths) == 4: self.moduleInpaintDepth = Inpaint().to(device).eval() models_list.append({'model':self.moduleInpaintDepth, 'type':'inpaint'}) load_models(models_list, model_paths) def __call__(self, tensorImage, zoom_settings, output_path=None, inpaint_depth=False, pretrained_estim=False): tensorImage = tensorImage.to(device) # tensorImage should be (1xCxHxW) only one image is feeded at a time through the pipeline self.objectCommon['intWidth'] = tensorImage.size()[3] self.objectCommon['intHeight'] = tensorImage.size()[2] self.objectCommon['tensorRawImage'] = tensorImage tensorImage = tensorImage.contiguous() tensorResized = resize_image(tensorImage, max_size=int(max(self.objectCommon['intWidth'], self.objectCommon['intHeight'])/2)) tensorDisparity = self.moduleDisparity(tensorResized, self.moduleSemantics(tensorResized)) # depth estimation if self.d2: tensorDisparity = torch.ones_like(tensorDisparity) tensorDisparity = self.moduleRefine(tensorImage, tensorDisparity) # increase resolution if tensorDisparity.min() < 0.0: tensorDisparity -= tensorDisparity.min() tensorDisparity = tensorDisparity / tensorDisparity.max() * self.objectCommon['dblBaseline'] # normalize disparities # Create 3D model from disparity, via depth tensorDepth = (self.objectCommon['dblFocal'] * self.objectCommon['dblBaseline']) / (tensorDisparity + 1e-7) tensorPoints = depth_to_points(tensorDepth, self.objectCommon['dblFocal']) # Delete networks variable to free memory as they are no longer to be used del self.moduleSemantics del self.moduleDisparity del self.moduleMaskrcnn del self.moduleRefine # Store useful data for next steps. self.objectCommon['dblDispmin'] = tensorDisparity.min().item() self.objectCommon['dblDispmax'] = tensorDisparity.max().item() self.objectCommon['objectDepthrange'] = cv2.minMaxLoc(src=tensorDepth[0, 0, 128:-128, 128:-128].detach().cpu().numpy(), mask=None) self.objectCommon['tensorRawPoints'] = tensorPoints.view(1, 3, -1) self.objectCommon['tensorRawImage'] = tensorImage self.objectCommon['tensorRawDisparity'] = tensorDisparity self.objectCommon['tensorRawDepth'] = tensorDepth if inpaint_depth: numpyResult = process_kenburns({ 'dblSteps': np.linspace(0.0, 1.0, 75).tolist(), 'objectFrom': zoom_settings['objectFrom'], 'objectTo': zoom_settings['objectTo'], 'boolInpaint': True, 'dolly': self.dolly }, self.objectCommon, [self.moduleInpaint, self.moduleInpaintDepth]) else: numpyResult = process_kenburns({ 'dblSteps': np.linspace(0.0, 1.0, 75).tolist(), 'objectFrom': zoom_settings['objectFrom'], 'objectTo': zoom_settings['objectTo'], 'boolInpaint': True, 'dolly': self.dolly }, self.objectCommon, self.moduleInpaint) if self.output_frames: for idx, frame in enumerate(tqdm(numpyResult, desc='Saving video frames')): frames_dir = os.path.join(output_path, 'frames') if not os.path.exists(frames_dir): os.makedirs(frames_dir) if pretrained_estim: frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) cv2.imwrite(frames_dir + '/' +str(idx) + '.png', frame) # Create video output if output_path is not None: if pretrained_estim: moviepy.editor.ImageSequenceClip(sequence=[ numpyFrame[:, :, :] for numpyFrame in numpyResult + list(reversed(numpyResult))[1:] ], fps=25).write_videofile(os.path.join(output_path,'3d_kbe.mp4'), codec='mpeg4') else: moviepy.editor.ImageSequenceClip(sequence=[ numpyFrame[:, :, ::-1] for numpyFrame in numpyResult + list(reversed(numpyResult))[1:] ], fps=25).write_videofile(os.path.join(output_path,'3d_kbe.mp4'), codec='mpeg4')
import json import os import toml from modules import login, room class Bot(object): """Drrr bot feito em Python""" def __init__(self): self.config = toml.load('config.toml') self.name = self.config['Botconfig']['name'] self.icon = self.config['Botconfig']['icon'] self.roomId = self.config['Botconfig']['room'] self.cookie = self.config['Botconfig']['cookie'] self.login = login.Connect(self.name,self.icon) self.checkLogin() def checkLogin(self): if not os.path.isfile(self.cookie): self.login.login() self.login.save_cookie(self.cookie) def start(self): run = room.Room() run.room_enter(self.roomId) if __name__ == "__main__": run = Bot() run.start()
from app import app import unittest import json from config import config from app.model import drop, init, db class TestUsers(unittest.TestCase): def setUp(self): self.app = app self.app.config.from_object(config['testing']) self.app_context = self.app.app_context() self.app_context.push() db.init_app(self.app) self.client = self.app.test_client() init() self.data = { "user": { "username": "Betty", "email": "betty@email.com", "password": "qwe123", "confirm_password": "qwe123" }, "auth": { "username": "Betty", "password": "qwe123" }, "non_user": { "username": "Larry", "password": "qwe123" } } def tearDown(self): drop() def test_create_user(self): res = self.client.post( '/api/v1/auth/signup', data=json.dumps(dict(self.data["user"])), content_type='application/json' ) self.assertEqual(res.status_code, 201) def test_signin_user(self): res = self.client.post( '/api/v1/auth/signup', data=json.dumps(dict(self.data["user"])), content_type='application/json' ) res = self.client.post( '/api/v1/auth/login', data=json.dumps(dict(self.data["auth"])), content_type='application/json' ) self.assertEqual(res.status_code, 202) def test_empty_field(self): self.data["user"]["username"] = "" res = self.client.post( '/api/v1/auth/signup', data=json.dumps(dict(self.data["user"])), content_type='application/json' ) self.assertEqual(res.status_code, 400) def test_whitespace_passed_as_input(self): self.data["user"]["username"] = " " res = self.client.post( '/api/v1/auth/signup', data=json.dumps(dict(self.data["user"])), content_type='application/json' ) self.assertEqual(res.status_code, 400) def test_input_not_string(self): self.data["user"]["username"] = 18347 res = self.client.post( '/api/v1/auth/signup', data=json.dumps(dict(self.data["user"])), content_type='application/json' ) self.assertEqual(res.status_code, 400) def test_wrong_email_format(self): self.data["user"]["email"] = "betty.com" res = self.client.post( '/api/v1/auth/signup', data=json.dumps(dict(self.data["user"])), content_type='application/json' ) self.assertEqual(res.status_code, 400) def test_confirm_password(self): self.data["user"]["confirm_password"] = "random" res = self.client.post( '/api/v1/auth/signup', data=json.dumps(dict(self.data["user"])), content_type='application/json' ) self.assertEqual(res.status_code, 400) def test_signin_unregistered_user(self): res = self.client.post( '/api/v1/auth/login', data=json.dumps(dict(self.data["non_user"])), content_type='application/json' ) res = self.client.post('/api/v1/signin', data=self.data["auth"]) self.assertEqual(res.status_code, 404) def test_same_user_registration(self): self.client.post( '/api/v1/auth/signup', data=json.dumps(dict(self.data["user"])), content_type='application/json' ) res = self.client.post( '/api/v1/auth/signup', data=json.dumps(dict(self.data["user"])), content_type='application/json' ) self.assertEqual(res.status_code, 400) if __name__ == "__main__": unittest.main()
# # PySNMP MIB module RIVERSTONE-RS94X-AGENTCAP-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/RIVERSTONE-RS94X-AGENTCAP-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 20:49:22 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsIntersection, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueRangeConstraint") InetAddress, = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddress") riverstoneAgentCapabilities, = mibBuilder.importSymbols("RIVERSTONE-SMI-MIB", "riverstoneAgentCapabilities") NotificationGroup, ModuleCompliance, AgentCapabilities = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "AgentCapabilities") Counter32, Bits, Unsigned32, NotificationType, Integer32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, MibIdentifier, iso, ObjectIdentity, TimeTicks, IpAddress, Counter64 = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Bits", "Unsigned32", "NotificationType", "Integer32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "MibIdentifier", "iso", "ObjectIdentity", "TimeTicks", "IpAddress", "Counter64") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") rstoneRs94xAgentCapabilityMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 5567, 10, 94)) rstoneRs94xAgentCapabilityMIB.setRevisions(('2003-06-21 00:00',)) if mibBuilder.loadTexts: rstoneRs94xAgentCapabilityMIB.setLastUpdated('200306210000Z') if mibBuilder.loadTexts: rstoneRs94xAgentCapabilityMIB.setOrganization('Riverstone Networks, Inc') rsCapability = MibIdentifier((1, 3, 6, 1, 4, 1, 5567, 10, 94, 1)) rs94x = AgentCapabilities((1, 3, 6, 1, 4, 1, 5567, 10, 94, 1, 10)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): rs94x = rs94x.setProductRelease('9.4.x') if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): rs94x = rs94x.setStatus('current') mibBuilder.exportSymbols("RIVERSTONE-RS94X-AGENTCAP-MIB", rs94x=rs94x, rstoneRs94xAgentCapabilityMIB=rstoneRs94xAgentCapabilityMIB, PYSNMP_MODULE_ID=rstoneRs94xAgentCapabilityMIB, rsCapability=rsCapability)
import collections import enum import re from PipelineModulesLib.Util import ScopedNode import slicer import vtk from PipelineCreatorLib.PipelineBases import SinglePiecePipeline # Note: this import may show up as unused in linting, but it is needed for the exec calls to work from PipelineCreator import CallAfterAllTheseModulesLoaded, PipelineCreatorLogic class BridgeParameterWrapper: ''' The whole point of this class is to delete the bridgeParameter (which was returned from C++ land as an owning pointer) when we are done with it ''' def __init__(self, bridgeParameter): self._bridgeParameter = bridgeParameter def __del__(self): self._bridgeParameter.deleteThis() def GetValue(self): return self._bridgeParameter.GetValue() def GetUI(self): return self._bridgeParameter.GetUI() @enum.unique class Channels(enum.Enum): Input = "input" Output = "output" NoneChannel = "" def toChannelsEnum(channelString): if channelString.lower() == "input": return Channels.Input if channelString.lower() == "output": return Channels.Output if channelString == "": return Channels.NoneChannel raise Exception("Unknown channel: " + channelString) CLIParameter = collections.namedtuple("CLIParameter", "name pipelineParameterName label tag channel ptype multiple") def getCLIParameters(cliNode): parameters = [] for i in range(cliNode.GetNumberOfParameterGroups()): for j in range(cliNode.GetNumberOfParametersInGroup(i)): parameters.append(CLIParameter( name=cliNode.GetParameterName(i,j), pipelineParameterName=cliNode.GetParameterName(i,j).capitalize(), label=cliNode.GetParameterLabel(i,j), tag=cliNode.GetParameterTag(i,j), channel=toChannelsEnum(cliNode.GetParameterChannel(i,j)), ptype=cliNode.GetParameterType(i,j), multiple=cliNode.GetParameterMultiple(i,j).lower() in ('true', '1'), )) return parameters def isMRML(cliTag): return cliTag in ('geometry', 'image') def cliParameterToMRMLType(cliParameter): disclaimer = "\n This type may well be supported by CLI modules, but it may not be supported yet by CLI pipeline wrapping." \ + " Please consider adding support." if cliParameter.tag == "geometry": if cliParameter.ptype in ("scalar", "model"): if cliParameter.multiple: # no way to do "and cliParameter.aggregate" return "vtkMRMLModelHierarchyNode" return "vtkMRMLModelNode" else: raise Exception("Unknown geometry type: " + cliParameter.ptype + disclaimer) elif cliParameter.tag == "image": if cliParameter.ptype == "label": return "vtkMRMLLabelMapVolumeNode" elif cliParameter.ptype == "scalar": return "vtkMRMLScalarVolumeNode" else: raise Exception("Unknown image type: " + cliParameter.ptype + disclaimer) else: raise Exception("Unknown tag: " + cliParameter.tag + disclaimer) currentlyUnsupportedTags = [ 'point', 'pointfile', 'region', 'table', 'transform', 'file', 'directory', ] def checkForUnsupportedTags(cliNode, excludeArgs): for i in range(cliNode.GetNumberOfParameterGroups()): for j in range(cliNode.GetNumberOfParametersInGroup(i)): name = cliNode.GetParameterName(i,j) tag = cliNode.GetParameterTag(i,j) if name not in excludeArgs and tag in currentlyUnsupportedTags: raise Exception('PipelineCLI Attempting to use currently unsupported tag: ' + tag + '\nPlease consider adding support!') def pipelineParameterName(s): return s.capitalize() def cliToPipelineParameters(factory, cliParameters, excludeParameterNames=None): if excludeParameterNames is None: excludeParameterNames = () if isinstance(excludeParameterNames, str): excludeParameterNames = (excludeParameterNames, ) parameters = [] for param in cliParameters: if not param.name in excludeParameterNames: paramWrapper = BridgeParameterWrapper(factory.CreateParameterWrapper(param.name)) if paramWrapper is None: raise Exception("Error paramWrapper should not be None. Did you load a module into the factory?") parameters.append((param.pipelineParameterName, param.label, paramWrapper)) return parameters _invalidCharactersRe = re.compile("[^a-zA-Z1-9_]") def _fixupModuleName(name): return _invalidCharactersRe.sub('', name) #if this name changes, change parentClass in PipelineCLI class DefaultOutputCLI(SinglePiecePipeline): def __init__(self): super().__init__() def _RunImpl(self, input): self._SetInput(input) output = slicer.mrmlScene.AddNewNodeByClass(self.GetOutputType()) if output.IsA('vtkMRMLDisplayableNode'): output.CreateDefaultDisplayNodes() self._SetOutput(output) with ScopedNode(slicer.cli.runSync(self.GetModule(), parameters=self._parameters)) as cliNode: if cliNode.GetStatus() & cliNode.ErrorsMask: #error text = cliNode.GetErrorText() raise Exception("CLI execution failed for " + self.GetModule().name + ": " + text) return output #if this name changes, change parentClass in PipelineCLI class ModelHierarchyOutputCLI(SinglePiecePipeline): def __init__(self): super().__init__() self._hierarchyName = None def _nameExists(self, name): shNode = slicer.mrmlScene.GetSubjectHierarchyNode() sceneItemID = shNode.GetSceneItemID() return any([ slicer.mrmlScene.GetNodesByName(name).GetNumberOfItems() > 0, shNode.GetItemChildWithName(sceneItemID, name) != 0, ]) def _RunImpl(self, input): ''' When a CLI modules has a model hierarchy as its output, the model hierarchy is imported into the scene, which will delete the model hierarchy node and put all of its models into a subject hierarchy folder with the same name the model hierarchy had. So we create the model hierarchy with a name that is completely unique, then after the cli run we find the subject hierarchy folder with that name and grab the first model out of it. ''' if self.GetOutputType() != 'vtkMRMLModelNode': raise Exception("Unable to run model hierarchy output CLI that doesn't have model as output") self._SetInput(input) modelHierarchy = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLModelHierarchyNode') basename = self.GetName() + "Models" self._hierarchyName = basename index = 0 while self._nameExists(self._hierarchyName): index += 1 self._hierarchyName = basename + str(index) modelHierarchy.SetName(self._hierarchyName) self._SetOutput(modelHierarchy) with ScopedNode(slicer.cli.runSync(self.GetModule(), parameters=self._parameters)) as cliNode: if cliNode.GetStatus() & cliNode.ErrorsMask: #error slicer.mrmlScene.RemoveNode(modelHierarchy) text = cliNode.GetErrorText() raise Exception("CLI execution failed for " + self.GetModule().name + ": " + text) # get output model shNode = slicer.mrmlScene.GetSubjectHierarchyNode() sceneItemID = shNode.GetSceneItemID() modelsFolderId = shNode.GetItemChildWithName(sceneItemID, self._hierarchyName) if modelsFolderId == 0: raise Exception(self.GetName() + ": Unable to find created models") children = vtk.vtkIdList() shNode.GetItemChildren(modelsFolderId, children) if children.GetNumberOfIds() == 0: raise Exception(self.GetName() + ": No models were created") outputShId = children.GetId(0) slicer.mrmlScene.RemoveNode(modelHierarchy) # this is unnecessary as of this writing, but adding for future safety shNode.SetItemParent(outputShId, shNode.GetItemParent(modelsFolderId)) shNode.RemoveItem(modelsFolderId) return shNode.GetItemDataNode(outputShId) _pipelineClassTemplate = ''' class PipelineWrapper_{fixupModuleName}({parentClass}): @staticmethod def GetName(): return '{moduleName}' @staticmethod def GetInputType(): return '{inputType}' @staticmethod def GetOutputType(): return '{outputType}' @staticmethod def GetParameters(): return PipelineWrapper_{fixupModuleName}._GetParametersImpl() @staticmethod def GetDependencies(): return PipelineWrapper_{fixupModuleName}._GetDependenciesImpl() def __init__(self): super().__init__() self._parameters = dict() def _SetInput(self, input): self.Set{inputParameter}(input) def _SetOutput(self, output): self.Set{outputParameter}(output) ''' def _deducePipelineRunArg(cliParameters, channel): options = [p for p in cliParameters if isMRML(p.tag) and p.channel == channel] if len(options) == 1: return options[0] raise Exception('Unable to deduce ' + str(channel) + ' argument: ' + str(options)) def getArgByName(cliParameters, name): return [p for p in cliParameters if p.name == name][0] def PipelineCLINow(cliModule, pipelineCreatorLogic=None, inputArgName=None, outputArgName=None, excludeArgs=None): if isinstance(cliModule, str): cliModule = slicer.app.moduleManager().module(cliModule) pipelineCreatorLogic = pipelineCreatorLogic or PipelineCreatorLogic() excludeArgs = excludeArgs or [] cliNode = slicer.cli.createNode(cliModule) checkForUnsupportedTags(cliNode, excludeArgs) cliParameters = getCLIParameters(cliNode) inputArg = getArgByName(cliParameters, inputArgName) if inputArgName else _deducePipelineRunArg(cliParameters, Channels.Input) outputArg = getArgByName(cliParameters, outputArgName) if outputArgName else _deducePipelineRunArg(cliParameters, Channels.Output) fixupModuleName = _fixupModuleName(cliModule.name) parentClass = "DefaultOutputCLI" outputType = cliParameterToMRMLType(outputArg) if outputType == "vtkMRMLModelHierarchyNode": parentClass = "ModelHierarchyOutputCLI" outputType = "vtkMRMLModelNode" classDef = _pipelineClassTemplate.format( moduleName=cliModule.name, fixupModuleName=fixupModuleName, inputParameter=inputArg.pipelineParameterName, outputParameter=outputArg.pipelineParameterName, inputType=cliParameterToMRMLType(inputArg), outputType=outputType, parentClass=parentClass ) # We need the class to exist in the __main__ namespace so we can pickle it exec(classDef, globals(), globals()) cliPipeline = globals()['PipelineWrapper_%s' % fixupModuleName] # need the abstract method implementation to exist at class definition, so delegating # to a method that doesn't need to exists as class definition. # Note: this is probably not necessary in Python 3.10 via abc.update_abstractmethods @staticmethod def _GetParametersImpl(): factory = slicer.qSlicerPipelineCLIModulesBridgeParameterFactory() factory.loadCLIModule(cliModule.name) return cliToPipelineParameters(factory, cliParameters, [inputArg.name, outputArg.name] + list(excludeArgs)) setattr(cliPipeline, "_GetParametersImpl", _GetParametersImpl) dependencies = list(set(list(cliModule.dependencies) + ['PipelineCreator'])) @staticmethod def _GetDependenciesImpl(): return dependencies setattr(cliPipeline, "_GetDependenciesImpl", _GetDependenciesImpl) @staticmethod def GetModule(): return cliModule setattr(cliPipeline, "GetModule", GetModule) for param in cliParameters: def makeFunc(fparam): #returning a function from inside a function was necessary to get all the parameter stuff copied correctly def setFunc(self, value): self._parameters[fparam.name] = value return setFunc setattr(cliPipeline, "Set" + param.pipelineParameterName, makeFunc(param)) pipelineCreatorLogic.registerModule(cliPipeline) # Recommended use: # # try: # from PipelineModulesLib.CLIModuleWrapping import PipelineCLI # PipelineCLI("MeshToLabelMap", inputArgName="mesh", excludeArgs=['reference']) # except ImportError: # pass def PipelineCLI(cliModuleName, pipelineCreatorLogic=None, inputArgName=None, outputArgName=None, excludeArgs=None): def f(): PipelineCLINow( slicer.app.moduleManager().module(cliModuleName), pipelineCreatorLogic, inputArgName, outputArgName, excludeArgs) CallAfterAllTheseModulesLoaded(f, ["PipelineCreator", cliModuleName])
#!/usr/bin/python from Cheetah.Template import Template from cgi import escape version = '0.1.1'; namespace = {'matplot_lib': '../matplot-' + version + '.js', 'version': version, 'renderer': ''}; templateDef=''' <!DOCTYPE html> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="author" content="Alexander Barth"> <title>matplot: $title</title> <link href="style.css" rel="stylesheet" type="text/css" /> <link href="prettify/prettify.css" type="text/css" rel="stylesheet" /> <script type="text/javascript" src="prettify/prettify.js"></script> <script type="text/javascript" src="numeric-1.2.3.js"></script> <link href="../dist/matplot.css" rel="stylesheet" type="text/css" /> <script type="text/javascript" src="../dist/matplot.js"></script> $head <script type="text/javascript"> function init() { $javascript } addEventListener('load', function (event) { prettyPrint() }, false); </script> </head> <body onload="init()"> <h1>matplot: $title</h1> <p>$description</p> <div id="plot"></div> <h2>Code:</h2> <pre class=prettyprint> $javascript_escaped </pre> </body> </html> ''' demos = [ # DEMO 1 {'title': '1D plot', 'description': 'Simple 1D line plots with different markers, anotation and legend (function plot)', 'name': 'plot', 'javascript': ''' var i,x=[],y=[],z=[]; for (i=0; i < 49; i++) { x[i] = i+3; y[i] = Math.sin(i/5); z[i] = Math.sin((i+3)/5); } // make a figure of size 700 x 500 pixels fig = new matplot.Figure($divid,700,500$renderer); ax = fig.axes(); // simple plot using indices as x-axis and values in parameter z as y-axis // The curve is draw with the given color. The label will be used later in the legend ax.plot(z,{color: 'red', label: 'foo'}); // define marker and marker size ax.plot(y,{color: 'blue', marker: 'o', markerSize: 5, label: 'bar'}); // make a dotted line with linespec ax.plot(x,y,{color: 'green', marker: 's', linewidth: 2, label: 'baz'}); // add legend ax.legend(); // add annotation at the location x=20, y=0.6 and z=0 with the given text ax.annotation(20,0.6,0,'annotation'); // draw everything fig.draw(); '''}, ############################# {'title': 'pcolor', 'description': '2D pseudo color plot (function pcolor)', 'name': 'peaks', 'javascript': ''' // load the peaks sample data peaks = matplot.peaks(); // make a figure of size 700 x 500 pixels fig = new matplot.Figure($divid,700,500$renderer); // add axis to the figure ax = fig.axes(); // pseudo color plot ax.pcolor(peaks.x,peaks.y,peaks.z); // add color-bar ax.colorbar(); // draw everything fig.draw(); '''}, ############################# {'title': 'patch', 'description': 'Draw a polygone (function patch)', 'name': 'patch', 'javascript': ''' // make a figure of size 700 x 500 pixels fig = new matplot.Figure($divid,700,500$renderer); // add axis to the figure ax = fig.axes(); // blue triangle ax.patch([0,2,1],[0,0,1],{color: 'blue'}); // draw everything fig.draw(); '''}, ############################# {'title': 'coastline', 'description': 'Example showing the coastline (function patch)', 'name': 'countries', 'head': '<script type="text/javascript" src="countries.js"></script>', 'javascript': ''' // the global variable 'countries' is loaded from the file countries.js var i,j, coord; // make a figure of size 700 x 500 pixels fig = new matplot.Figure($divid,700,500$renderer); // add axis to the figure ax = fig.axes(); // loop over all countries and polygones for (i=0; i < countries.length; i++) { for (j=0; j < countries[i].coordinates.length; j++) { // countries[i].coordinates[j] is a list of [longitude,latitude] coord = numeric.transpose(countries[i].coordinates[j]); ax.patch(coord[0],coord[1],{color: 'blue'}); } } // draw everything fig.draw(); '''}, ############################# {'title': 'pcolor on non-rectangular grid', 'description': '2D pseudo color plot on non-rectangular grid (function pcolor)', 'name': 'pcolor', 'javascript': ''' var i,j,x=[],y=[],z=[], r, theta; // generate some data to plot for (i=0; i < 100; i++) { x[i] = []; y[i] = []; z[i] = []; for (j=0; j < 100; j++) { r = 20 + i; theta = 2*Math.PI * j/99; x[i][j] = r*Math.cos(theta); y[i][j] = r*Math.sin(theta); z[i][j] = Math.cos(2*theta); } } fig = new matplot.Figure($divid,700,500$renderer); ax = fig.axes(); ax.pcolor(x,y,z); ax.colorbar(); ax.DataAspectRatio([1,1,1]); fig.draw(); '''}, ############################# {'title': 'quiver', 'description': 'Arrow plot (function quiver)', 'name': 'quiver', 'javascript': ''' var i,j,x=[],y=[],u=[], v=[]; // generate some data to plot for (i=0; i < 30; i++) { x[i] = []; y[i] = []; u[i] = []; v[i] = []; for (j=0; j < 30; j++) { x[i][j] = i-15 y[i][j] = j-15 u[i][j] = -y[i][j] v[i][j] = x[i][j] } } fig = new matplot.Figure($divid,700,500$renderer); ax = fig.axes(); //ax.quiver(x,y,u,v,{scale: 0.1,color: 'green'}); ax.quiver(x,y,u,v,{scale: 0.1,color: 'norm'}); ax.DataAspectRatio([1,1,1]); ax.colorbar(); fig.draw(); '''}, ############################# {'title': 'overlay', 'description': 'Overlay of two plots', 'name': 'overlay', 'javascript': ''' var i,j, u=[], v=[], xq = [], yq = []; // load the peaks sample data peaks = matplot.peaks(); for (i=0; i < peaks.x.length-2; i++) { u[i] = []; v[i] = []; xq[i] = []; yq[i] = []; for (j=0; j < peaks.x[0].length-2; j++) { xq[i][j] = peaks.x[i+1][j+1] yq[i][j] = peaks.y[i+1][j+1] // compute geostrophic currents u[i][j] = -(peaks.z[i+1][j+2] - peaks.z[i+1][j]); v[i][j] = peaks.z[i+2][j+1] - peaks.z[i][j+1]; } } // make a figure of size 700 x 500 pixels //fig = new matplot.Figure($divid,700,500,{renderer: matplot.RasterCanvas}); fig = new matplot.Figure($divid,700,500); // add axis to the figure ax = fig.axes(); // pseudo color plot ax.pcolor(peaks.x,peaks.y,peaks.z); // arrow plot ax.quiver(xq,yq,u,v,{scale: 0.1}); // add color-bar ax.colorbar(); // draw everything fig.draw(); '''}, ############################# {'title': 'scatter', 'description': '2D scatter plot (function scatter)', 'name': 'scatter', 'javascript': ''' var i,x=[],y=[],c=[]; // generate some data to plot // generate some data to plot for (i=0; i < 100; i++) { r = 20 + 5*i; theta = 2*Math.PI * i/25; x[i] = r*Math.cos(theta); y[i] = r*Math.sin(theta); c[i] = r; } fig = new matplot.Figure($divid,700,500$renderer); ax = fig.axes(); ax.scatter(x,y,5,c); ax.DataAspectRatio([1,1,1]); ax.colorbar(); fig.draw(); '''}, ############################# {'title': 'Surface plot', 'description': '3D pseudo color plot (function surf)', 'name': 'surf', 'javascript': ''' // load the peaks sample data peaks = matplot.peaks(); // make a figure of size 700 x 500 pixels fig = new matplot.Figure($divid,700,500); // add axis to the figure ax = fig.axes(); // pseudo color plot ax.surf(peaks.x,peaks.y,peaks.z); //ax.CameraPosition([27.394, 35.701, 25.981]); // add color-bar ax.colorbar(); // draw everything fig.draw(); '''}, ############################# {'title': '3D scatter', 'description': '3D scatter plot (function scatter3)', 'name': 'scatter3', 'javascript': ''' var i, j, k, x = [],y = [], z = [], c = [], r = []; // generate some data to plot for (i=0; i < 4; i++) { x[i] = []; y[i] = []; z[i] = []; c[i] = []; for (j=0; j < 20; j++) { x[i][j] = []; y[i][j] = []; z[i][j] = []; c[i][j] = []; for (k=0; k < 10; k++) { r = 20 + 5*i; phi = 2*Math.PI * j/19; theta = Math.PI * k/9; phi = 2*Math.PI * j/9; theta = Math.PI * k/4; x[i][j][k] = r * Math.sin(theta) * Math.cos(phi); y[i][j][k] = r * Math.sin(theta) * Math.sin(phi); z[i][j][k] = r * Math.cos(theta); c[i][j][k] = r; } } } fig = new matplot.Figure($divid,700,500); ax = fig.axes(); ax.scatter3(x,y,z,5,c); ax.DataAspectRatio([1,1,1]); ax.colorbar(); ax.CameraPosition([100, 100, 40]); fig.draw(); '''}, ############################# # {'title': 'pcolor', # 'description': '2D pseudo color plot', # 'name': 'pcolor', # 'javascript': ''' # '''}, ] # template for index file index = ''' <!DOCTYPE html> <html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <meta name="keywords" content="visualization, demo, JavaScript, web-app, education"> <meta name="author" content="Alexander Barth"> <title>matplot: demo</title> <link href="style.css" rel="stylesheet" type="text/css" /> </head> <h1>matplot</h1> <ul> $example </ul> </body> </html> ''' def makeDemos(demos): example = [] onlySVG = True for demo in demos: if 'head' not in demo: demo['head'] = '' if onlySVG: example += ['<li><a href="demo_' + demo['name'] + '.html">'+ demo['description'] + "</a>"] else: example += ['<li>' + demo['description']] for renderer in ['','matplot.RasterCanvas']: print 'Making example:',demo['title'] if renderer == 'matplot.RasterCanvas': rtype = 'canvas' filename = 'demo_' + demo['name'] + '_canvas' + '.html' namespace['renderer'] = ', {renderer: ' + renderer + '}' else: rtype = 'svg' filename = 'demo_' + demo['name'] + '.html' namespace['renderer'] = '' example += [' <a href="',filename,'">',rtype,'</a> '] javascript = demo['javascript'].replace('$renderer',namespace['renderer']).replace('$divid','"plot"') f = open(filename,'w') f.write(str(Template(templateDef, searchList=[ {'javascript': javascript, 'javascript_escaped': escape(javascript) }, demo,namespace]))) f.close(); example += ['</li>'] f = open('index.html','w') f.write(str(Template(index, searchList={'example': ''.join(example)}))) f.close(); if __name__ == '__main__': makeDemos(demos)
# -*- coding: utf-8 -*- """ Created on Sat Nov 26 23:15:48 2016 @author: oacom """ import numpy as np from terrain import genTerrain, interpTerrain, calcNormals, loadTerrain from initCameras import generateCameras from plotting import plotAll from checkPoints import checkPoints from sortAngles import sortAngles from greedySort import greedySort import time from loadCameras import loadCameras start = time.clock() np.random.seed(0) # Create random terrain nn,ee,dd = genTerrain(100,100,50,5) # OR Load terrain from file #nn,ee,dd = loadTerrain('terrain/output.mean.tif') # Smooth terrain nn,ee,dd = interpTerrain(nn,ee,dd,2) # Terrain normals norm_n,norm_e,norm_d = calcNormals(nn,ee,dd) # Initialize cameras from normals cn,ce,cd,cor_n,cor_e,cor_d = generateCameras(nn, ee, dd, norm_n, norm_e, norm_d, 50) # OR Load cameras from image metadata #directory_images = 'E:/21_Sept_2015_WAMI_Flight_1/2015.09.21_15.39.54' #directory_metadata = 'E:/Bags/20150921_Flight1' #cn,ce,cd,cor_n,cor_e,cor_d = loadCameras(directory_images,directory_metadata) # Compute camera point visibility matrix cams_cors = np.c_[cn,ce,cd,cor_n,cor_e,cor_d] P = np.c_[nn.flatten(),ee.flatten(),dd.flatten()].T #start_v = time.clock() # rows are cameras cols are points visibility = np.apply_along_axis(checkPoints,1,cams_cors,P=P,f=200,n=5,alphax=np.radians(30),alphay=np.radians(30)) #print('Visibility Time: ' + str(time.clock()-start_v)) #fig = plt.figure() #ax = plt.gca() #ax.imshow(visibility,cmap='Greys',interpolation='nearest') # What does this covariance/correspondence plot mean? # Sort visible points by angle vis_cors = np.c_[visibility,cor_n,cor_e,cor_d] # Combine all camera info needed visibilityAngle = np.apply_along_axis(sortAngles,1,vis_cors,norm_n=norm_n,norm_e=norm_e,norm_d=norm_d) # # Camera selection c_min, numCams, camIdx = greedySort(visibilityAngle,0.90) # Plotting time_noplot = time.clock()-start visStack = np.sum(np.r_[c_min[:,0:100],c_min[:,100:200],c_min[:,200:300]],axis=0) plotAll(1,1,1,1,1,nn,ee,dd,cn,ce,cd,norm_n,norm_e,norm_d,cor_n,cor_e,cor_d,camIdx,visStack) # Ouput print('Terrain Points: ' + str(nn.size)) print('Starting Cameras: ' + str(cn.size)) print('Ending Cameras: ' + str(numCams)) print('Terrain Points covered by 3 Cameras: ' + str(np.sum(visStack>=3))) print('Terrain Points covered by 2 Cameras: ' + str(np.sum(visStack==2))) print('Terrain Points covered by 1 Cameras: ' + str(np.sum(visStack==1))) print('Terrain Points covered by 0 Cameras: ' + str(np.sum(visStack==0))) print('Solution Time: ' + str(time_noplot)) print('Solution Time with plotting: ' + str(time.clock()-start))
from string import ascii_lowercase, ascii_uppercase, digits def main(): s = input() lowercase = ''.join(sorted([i for i in s if i in ascii_lowercase])) uppercase = ''.join(sorted([i for i in s if i in ascii_uppercase])) odd = ''.join(sorted([i for i in s if i in digits and int(i) % 2 == 1])) even = ''.join(sorted([i for i in s if i in digits and int(i) % 2 == 0])) print(lowercase + uppercase + odd + even) if __name__ == "__main__": main()
"""Module defining encoders.""" from opennmt.encoders.encoder import SequentialEncoder, ParallelEncoder from opennmt.encoders.rnn_encoder import UnidirectionalRNNEncoder from opennmt.encoders.rnn_encoder import BidirectionalRNNEncoder from opennmt.encoders.rnn_encoder import RNMTPlusEncoder from opennmt.encoders.rnn_encoder import GoogleRNNEncoder from opennmt.encoders.rnn_encoder import PyramidalRNNEncoder from opennmt.encoders.conv_encoder import ConvEncoder from opennmt.encoders.self_attention_encoder import SelfAttentionEncoder from opennmt.encoders.mean_encoder import MeanEncoder
# Copyright Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Requires Python 2.6+ and Openssl 1.0+ # import xml from azurelinuxagent.common.protocol.goal_state import GoalState, RemoteAccess # pylint: disable=unused-import from tests.tools import AgentTestCase, load_data, patch, Mock # pylint: disable=unused-import from tests.protocol import mockwiredata from tests.protocol.mocks import mock_wire_protocol class TestRemoteAccess(AgentTestCase): def test_parse_remote_access(self): data_str = load_data('wire/remote_access_single_account.xml') remote_access = RemoteAccess(data_str) self.assertNotEqual(None, remote_access) self.assertEqual("1", remote_access.incarnation) self.assertEqual(1, len(remote_access.user_list.users), "User count does not match.") self.assertEqual("testAccount", remote_access.user_list.users[0].name, "Account name does not match") self.assertEqual("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") self.assertEqual("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") def test_goal_state_with_no_remote_access(self): with mock_wire_protocol(mockwiredata.DATA_FILE) as protocol: self.assertIsNone(protocol.client.get_remote_access()) def test_parse_two_remote_access_accounts(self): data_str = load_data('wire/remote_access_two_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEqual(None, remote_access) self.assertEqual("1", remote_access.incarnation) self.assertEqual(2, len(remote_access.user_list.users), "User count does not match.") self.assertEqual("testAccount1", remote_access.user_list.users[0].name, "Account name does not match") self.assertEqual("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") self.assertEqual("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") self.assertEqual("testAccount2", remote_access.user_list.users[1].name, "Account name does not match") self.assertEqual("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") self.assertEqual("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") def test_parse_ten_remote_access_accounts(self): data_str = load_data('wire/remote_access_10_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEqual(None, remote_access) self.assertEqual(10, len(remote_access.user_list.users), "User count does not match.") def test_parse_duplicate_remote_access_accounts(self): data_str = load_data('wire/remote_access_duplicate_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEqual(None, remote_access) self.assertEqual(2, len(remote_access.user_list.users), "User count does not match.") self.assertEqual("testAccount", remote_access.user_list.users[0].name, "Account name does not match") self.assertEqual("encryptedPasswordString", remote_access.user_list.users[0].encrypted_password, "Encrypted password does not match.") self.assertEqual("2019-01-01", remote_access.user_list.users[0].expiration, "Expiration does not match.") self.assertEqual("testAccount", remote_access.user_list.users[1].name, "Account name does not match") self.assertEqual("encryptedPasswordString", remote_access.user_list.users[1].encrypted_password, "Encrypted password does not match.") self.assertEqual("2019-01-01", remote_access.user_list.users[1].expiration, "Expiration does not match.") def test_parse_zero_remote_access_accounts(self): data_str = load_data('wire/remote_access_no_accounts.xml') remote_access = RemoteAccess(data_str) self.assertNotEqual(None, remote_access) self.assertEqual(0, len(remote_access.user_list.users), "User count does not match.") def test_update_remote_access_conf_remote_access(self): with mock_wire_protocol(mockwiredata.DATA_FILE_REMOTE_ACCESS) as protocol: self.assertIsNotNone(protocol.client.get_remote_access()) self.assertEqual(1, len(protocol.client.get_remote_access().user_list.users)) self.assertEqual('testAccount', protocol.client.get_remote_access().user_list.users[0].name) self.assertEqual('encryptedPasswordString', protocol.client.get_remote_access().user_list.users[0].encrypted_password) def test_parse_bad_remote_access_data(self): data = "foobar" self.assertRaises(xml.parsers.expat.ExpatError, RemoteAccess, data)
"""Utility that collects interpreted benchmark timings for a commit. Run "python3 -m reporting.collect_baseline --help" for more information. """ from typing import Tuple from datetime import datetime import argparse from reporting.common import get_csv_path from reporting.gitutil import get_current_commit from reporting.collect import write_csv_line, run_bench def parse_args() -> Tuple[str, str]: parser = argparse.ArgumentParser( description="""Run an interpreted benchmark, and append result to the file <data_repo>/data/<benchmark>-cpython.csv.""") parser.add_argument( "benchmark", help="""benchmark name, such as 'richards' (use 'runbench.py --list' to show valid values)""") parser.add_argument( "data_repo", help="target data repository where output will be written (this will be modified!)") args = parser.parse_args() return args.benchmark, args.data_repo def main() -> None: benchmark, data_repo = parse_args() now = datetime.utcnow() benchmark_commit = get_current_commit(".") runtime, stddev = run_bench(benchmark, None, compiled=False) fnam = get_csv_path(data_repo, benchmark, cpython=True) write_csv_line(fnam, benchmark, now, runtime, stddev, "", benchmark_commit) if __name__ == "__main__": main()
# --------------------------------------------------------------- # __init__.py # Set-up time: 2020/5/14 下午4:09 # Copyright (c) 2020 ICT # Licensed under The MIT License [see LICENSE for details] # Written by Kenneth-Wong (Wenbin-Wang) @ VIPL.ICT # Contact: wenbin.wang@vipl.ict.ac.cn [OR] nkwangwenbin@gmail.com # --------------------------------------------------------------- from .runner import *
#! /usr/bin/env python # -*- coding: utf-8 -*- # The MIT License (MIT) # # Copyright (c) 2015 Bartosz Janda # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from .. import helpers from ..common import SummaryBase import UIColor class UIDeviceWhiteColorSyntheticProvider(UIColor.UIColorSyntheticProvider): """ Class representing UIDeviceWhiteColor. """ def __init__(self, value_obj, internal_dict): super(UIDeviceWhiteColorSyntheticProvider, self).__init__(value_obj, internal_dict) self.type_name = "UIDeviceWhiteColor" self.register_child_value("white_component", ivar_name="whiteComponent", primitive_value_function=SummaryBase.get_float_value, summary_function=self.get_white_component_summary) self.register_child_value("alpha_component", ivar_name="alphaComponent", primitive_value_function=SummaryBase.get_float_value, summary_function=self.get_alpha_component_summary) self.synthetic_children = ["white_component", "alpha_component", "system_color_name"] @staticmethod def get_white_component_summary(value): return "white={}".format(SummaryBase.formatted_float(value)) @staticmethod def get_alpha_component_summary(value): if value == 1: return None return "alpha={}".format(SummaryBase.formatted_float(value)) def summaries_parts(self): return [self.white_component_summary, self.alpha_component_summary, self.system_color_name_summary] def summary_provider(value_obj, internal_dict): return helpers.generic_summary_provider(value_obj, internal_dict, UIDeviceWhiteColorSyntheticProvider)
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). # You may not use this file except in compliance with the License. # A copy of the License is located at # # http://www.apache.org/licenses/LICENSE-2.0 # # or in the "license" file accompanying this file. This file is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either # express or implied. See the License for the specific language governing # permissions and limitations under the License. # Third-party imports import numpy as np import pandas as pd import pytest # First-party imports from gluonts.dataset.field_names import FieldName from gluonts.dataset.common import ListDataset from gluonts.transform import AddAggregateLags expected_lags_calendar = { "prediction_length_2": { "train": np.array( [ [0, 0, 0, 0, 0, 1, 1, 2, 2, 3], [0, 0, 0, 0, 0, 0, 0, 1, 1, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ), "test": np.array( [ [0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4.5], [0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ), }, "prediction_length_1": { "train": np.array( [ [0, 0, 0, 1, 1, 2, 2, 3, 3, 4.5], [0, 0, 0, 0, 0, 1, 1, 2, 2, 3], [0, 0, 0, 0, 0, 0, 0, 1, 1, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ), "test": np.array( [ [0, 0, 0, 1, 1, 2, 2, 3, 3, 4.5, 4.5], [0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3], [0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ), }, } expected_lags_rolling = { "prediction_length_2": { "train": np.array( [ [0, 0, 0, 1, 1, 1.5, 2, 2.5, 3, 3.5], [0, 0, 0, 0, 0, 1, 1, 1.5, 2, 2.5], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1.5], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ), "test": np.array( [ [0, 0, 0, 1, 1, 1.5, 2, 2.5, 3, 3.5, 4.5, 5.5], [0, 0, 0, 0, 0, 1, 1, 1.5, 2, 2.5, 3, 3.5], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1.5, 2, 2.5], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1.5], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ), }, "prediction_length_1": { "train": np.array( [ [0, 0, 0, 1, 1, 1.5, 2, 2.5, 3, 3.5], [0, 0, 0, 0, 0, 1, 1, 1.5, 2, 2.5], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1.5], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ), "test": np.array( [ [0, 0, 0, 1, 1, 1.5, 2, 2.5, 3, 3.5, 4.5], [0, 0, 0, 0, 0, 1, 1, 1.5, 2, 2.5, 3], [0, 0, 0, 0, 0, 0, 0, 1, 1, 1.5, 2], [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] ), }, } valid_lags_calendar = { "prediction_length_2": [2, 3, 4, 6], "prediction_length_1": [1, 2, 3, 4, 6], } valid_lags_rolling = { "prediction_length_2": [1, 2, 3, 4, 6], "prediction_length_1": [1, 2, 3, 4, 6], } @pytest.mark.parametrize("pred_length", [2, 1]) @pytest.mark.parametrize("rolling_lags", [True, False]) def test_agg_lags(pred_length, rolling_lags): # create dummy dataset target = np.array([1, 1, 1, 2, 2, 3, 3, 4, 5, 6]) start = pd.Timestamp("01-01-2019 01:00:00", freq="1H") freq = "1H" ds = ListDataset( [{FieldName.TARGET: target, FieldName.START: start}], freq=freq ) # 2H aggregate lags lags_2H = [1, 2, 3, 4, 6] add_agg_lags = AddAggregateLags( target_field=FieldName.TARGET, output_field="lags_2H", pred_length=pred_length, base_freq=freq, agg_freq="2H", agg_lags=lags_2H, rolling_agg=rolling_lags, ) assert add_agg_lags.ratio == 2 train_entry = next(add_agg_lags(iter(ds), is_train=True)) test_entry = next(add_agg_lags(iter(ds), is_train=False)) if rolling_lags: assert ( add_agg_lags.valid_lags == valid_lags_rolling[f"prediction_length_{pred_length}"] ) assert np.allclose( train_entry["lags_2H"], expected_lags_rolling[f"prediction_length_{pred_length}"]["train"], ) assert np.allclose( test_entry["lags_2H"], expected_lags_rolling[f"prediction_length_{pred_length}"]["test"], ) else: assert ( add_agg_lags.valid_lags == valid_lags_calendar[f"prediction_length_{pred_length}"] ) assert np.allclose( train_entry["lags_2H"], expected_lags_calendar[f"prediction_length_{pred_length}"][ "train" ], ) assert np.allclose( test_entry["lags_2H"], expected_lags_calendar[f"prediction_length_{pred_length}"]["test"], )
OSNAME = "MACOS" NUMCORES = 4 BUILDCOMMAND = ["make", "-j" + str(NUMCORES)] CLEANCOMMAND = ["make", "clean"] FBSDIR = "/Users/mherron/Projects/FEBioStudio/" FBSBUILDDIR = FBSDIR + "build/" FBSPATH = FBSBUILDDIR + "bin/FEBioStudio.app/Contents/MacOS/FEBioStudio" FBSREMOTEDIR = "macOS/stage/FEBioStudio.app/Contents/MacOS" FEBIODIR = "/Users/mherron/Projects/FEBio/" FEBIOBUILDDIR = FEBIODIR + "cbuild/Release/" FEBIOPATH = FEBIOBUILDDIR + "bin/febio3" FEBIOUPLOADPATH = FBSBUILDDIR + "bin/FEBioStudio.app/Contents/MacOS/febio3" FEBIOREMOTEDIR = "macOS/stage/FEBioStudio.app/Contents/MacOS" CHEMDIR = "/Users/mherron/Projects/Plugins/FEBioChem/" CHEMBUILDDIR = CHEMDIR + "cbuild/" CHEMPATH = CHEMBUILDDIR + "lib/libFEBioChem.dylib" CHEMREMOTEDIR = "macOS/stage/FEBioStudio.app/Contents/Frameworks" HEATDIR = "/Users/mherron/Projects/Plugins/FEBioHeat/" HEATBUILDDIR = HEATDIR + "cbuild/" HEATPATH = HEATBUILDDIR + "lib/libFEBioHeat.dylib" HEATREMOTEDIR = "macOS/stage/FEBioStudio.app/Contents/Frameworks" TESTDIR = "/Users/mherron/Projects/TestSuite/" VERIFYDIR = TESTDIR + "Verify3/" LOGDIR = TESTDIR + "Logs/" WORKINGDIR = "/Users/mherron/scratch/FEBioTests/" AUTOMATIONDIR = "/Users/mherron/Projects/automation/" GOLDSTANDARDS = "macOSGoldStandards.py" RELEASELOG = AUTOMATIONDIR + "release.log" REMOTESCRIPT = "/root/update2/FEBioStudioDev/makeDevReleaseMacOS.sh" pluginPaths = {'heat': HEATPATH, 'chem': CHEMPATH} localExemptTests = []
import types import json import schematics from schematics.models import Model from schematics.types import IntType, StringType from schematics.types.compound import ModelType from pyco.helpers.schema import BytesType, TupleType, LazyDictType from pyco import errors, helpers class Code(Model): argcount = IntType(required=True, default=0) kwonlyargcount = IntType(default=0) cellvars = TupleType(required=True) code = BytesType(required=True) consts = TupleType(required=True) filename = StringType(required=True) firstlineno = IntType(required=True) flags = IntType(required=True) freevars = TupleType(required=True) lnotab = BytesType(required=True) name = StringType(required=True, default='<string>') names = TupleType(required=True) nlocals = IntType(required=True) stacksize = IntType(required=True) varnames = TupleType(required=True) def __init__(self, *args, **kwargs): super(Code, self).__init__(raw_data=kwargs) try: self.validate() except schematics.exceptions.ModelValidationError as err: raise errors.ValidationError(err.messages) @classmethod def from_json(cls, data): return cls.from_dict(json.loads(data)) @classmethod def from_code(cls, co): data = helpers.code.code_props(co, preffix=False) return cls(**data) @classmethod def from_fn(cls, fn): return cls.from_code(fn.__code__) def as_code(self): return types.CodeType( self.argcount, self.kwonlyargcount, self.nlocals, self.stacksize, self.flags, self.code, self.consts, self.names, self.varnames, self.filename, self.name, self.firstlineno, self.lnotab, self.freevars, self.cellvars ) def as_json(self): return json.dumps(helpers.code.pre_dump(self.as_dict())) def as_dict(self, prepare=False): if not prepare: return {k:v for (k, v) in self.items()} return helpers.code.pre_dump({k:v for (k, v) in self.items()})
from dsslite import * db = Database() w = Worker(db) sim = Simulation(w, sim_speed=0) sim.run()
# coding: utf-8 import pprint import re import six class Encryption: """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ sensitive_list = [] openapi_types = { 'hls_encrypt': 'HlsEncrypt', 'multidrm': 'Multidrm', 'preview_duration': 'int' } attribute_map = { 'hls_encrypt': 'hls_encrypt', 'multidrm': 'multidrm', 'preview_duration': 'preview_duration' } def __init__(self, hls_encrypt=None, multidrm=None, preview_duration=0): """Encryption - a model defined in huaweicloud sdk""" self._hls_encrypt = None self._multidrm = None self._preview_duration = None self.discriminator = None if hls_encrypt is not None: self.hls_encrypt = hls_encrypt if multidrm is not None: self.multidrm = multidrm if preview_duration is not None: self.preview_duration = preview_duration @property def hls_encrypt(self): """Gets the hls_encrypt of this Encryption. :return: The hls_encrypt of this Encryption. :rtype: HlsEncrypt """ return self._hls_encrypt @hls_encrypt.setter def hls_encrypt(self, hls_encrypt): """Sets the hls_encrypt of this Encryption. :param hls_encrypt: The hls_encrypt of this Encryption. :type: HlsEncrypt """ self._hls_encrypt = hls_encrypt @property def multidrm(self): """Gets the multidrm of this Encryption. :return: The multidrm of this Encryption. :rtype: Multidrm """ return self._multidrm @multidrm.setter def multidrm(self, multidrm): """Sets the multidrm of this Encryption. :param multidrm: The multidrm of this Encryption. :type: Multidrm """ self._multidrm = multidrm @property def preview_duration(self): """Gets the preview_duration of this Encryption. 加密预览时长, 单位秒(S), 0 - preview_duration之间的内容不加密 :return: The preview_duration of this Encryption. :rtype: int """ return self._preview_duration @preview_duration.setter def preview_duration(self, preview_duration): """Sets the preview_duration of this Encryption. 加密预览时长, 单位秒(S), 0 - preview_duration之间的内容不加密 :param preview_duration: The preview_duration of this Encryption. :type: int """ self._preview_duration = preview_duration def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: if attr in self.sensitive_list: result[attr] = "****" else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, Encryption): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
from jsonschema import validate def validate_status_code(response, expected_status): """Please specify the parameters in the following order => (response, expected_status)""" assert response.status_code == int(expected_status), f"Status code validation failed. Expected {expected_status}, Actual {response.status_code}" def validate_strings(expected_message, actual_message): """Please specify the parameters in the following order => (expected, actual)""" assert expected_message == actual_message, f"String validation failed. Expected {expected_message}, Actual {actual_message}" def validate_schema(response, expected_schema): """Please specify the parameters in the following order => (expected, actual)""" validate(instance=response, schema=expected_schema), "schema validation failed" def value_present(response, expected_value): """Please specify the parameters in the following order => (expected, actual)""" if type(response) != dict: response = response.json() assert expected_value in response, f"Attribute {expected_value} is not available" def is_json(response): """This function will validate whether the header has the content-type as json""" assert 'application/json' in response.headers.get('content-type'), "The response is not json value"
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' # File: /playstream3.py # Created Date: Monday April 29th 2019 # ----- # Last Modified: Sunday May 12th 2019 7:22:55 pm ''' from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import hashlib import json import logging import re import signal import subprocess import sys import time import urllib.request from typing import Dict, Tuple class Client(object): def __init__(self, server_host, server_port, multi_players=False): self.server_host = server_host self.server_port = server_port self.engine_version = "" self.engine_version_code = 0 self.multi_players = multi_players self.poll_time = 2 self.running = False def __enter__(self): def stop(sig_num, stack_frame): self.running = False self.running = True logging.info("Client starts.") signal.signal(signal.SIGINT, stop) return self def __exit__(self, exc_type=None, exc_value=None, traceback=None): if any([exc_type, exc_value, traceback]): logging.error(repr(exc_type)) logging.error(repr(exc_value)) logging.error(repr(traceback)) logging.info("Client exit.") return True def _api_request(self, url: str) -> Dict: """Send request to acestream server and return response json dict Args: url: api url Returns: dict: response json data """ response = urllib.request.urlopen(url) return json.loads(response.read().decode()) def _check_server_availability(self) -> bool: """Check server availability before start streaming Returns: bool: wether server is avaliable """ url = "http://{}:{}/webui/api/service?method=get_version&format=jsonp&callback=".format( self.server_host, self.server_port) try: response_dic = self._api_request(url) except: logging.exception("Check server availability failed!") return False else: if response_dic.get("error"): return False self.engine_version = response_dic.get("result").get("version") self.engine_version_code = int( response_dic.get("result").get("code")) logging.info("acestream engine version: {}".format( self.engine_version)) logging.info("acestream engine version code: {}".format( self.engine_version_code)) return True def start_streaming(self, content_id: str) -> Tuple[str, str]: """Start streaming content ID Args: content_id: acestream content ID Returns: playback_url: playback url for media player to stream stat_url: stat url for client to get stat info of acestream engine """ if self.multi_players: # generate a player id to support multi players playing player_id = hashlib.sha1(content_id.encode()).hexdigest() url = 'http://{}:{}/ace/getstream?format=json&id={}&pid={}'.format( self.server_host, self.server_port, content_id, player_id) else: url = 'http://{}:{}/ace/getstream?format=json&id={}'.format( self.server_host, self.server_port, content_id) try: response_dic = self._api_request(url) except: logging.exception( "Parsing server http response failed while starting streaming!" ) return "", "" else: playback_url = response_dic.get('response').get("playback_url") stat_url = response_dic.get('response').get("stat_url") return playback_url, stat_url def _start_media_player(self, media_player: str, playback_url: str) -> bool: """Start media to get stream from acestream server Args: media_player: media player cli program name playback_url: acestream server playback url Return: bool: whether media player stared successfully """ # change this if the predefined command is not suitable for your player cmd = [media_player, playback_url] try: process = subprocess.run(cmd) process.check_returncode() except subprocess.CalledProcessError: logging.exception("{} didn't exit normally!".format(media_player)) return False return True def _monitor_stream_status(self, stat_url: str) -> None: """Keep monitor stream stat status Args: stat_url: acestream server stat url """ def stream_stats_message(response: dict) -> str: return 'Status: {} | Peers: {:>3} | Down: {:>4}KB/s | Up: {:>4}KB/s'.format( response.get('response', { 'status': 0 }).get('status', ""), response.get('response', { 'peers': 0 }).get('peers', 0), response.get('response', { 'speed_down': 0 }).get('speed_down', 0), response.get('response', { 'speed_up': 0 }).get('speed_up', 0)) while (self.running): print(stream_stats_message(self._api_request(stat_url))) time.sleep(self.poll_time) def run(self, content_id: str, media_player: str) -> bool: """A simplified api for running whole process easily Args: content_id: acestream content ID media_player: media player to play the stream Returns: bool: whether client run successfully """ if not self._check_server_availability(): logging.error( "Server is not available. Please check server status") return False logging.info("Acestream server is available") playback_url, stat_url = self.start_streaming(content_id) if not playback_url or not stat_url: return False logging.debug("Server playback url: {}".format(playback_url)) logging.debug("Server stat url: {}".format(stat_url)) if not self._start_media_player(media_player, playback_url): return False self._monitor_stream_status(stat_url) DEFAULT_SERVER_HOSTNAME = '127.0.0.1' DEFAULT_SERVER_PORT = 6878 DEFAULT_MEDIA_PLAYER = "iina" SERVER_POLL_TIME = 2 SERVER_STATUS_STREAM_ACTIVE = 'dl' FORMAT = '%(levelname)s %(asctime)-15s %(filename)s %(lineno)-8s %(message)s' def parse_args() -> argparse.Namespace: """Parse comand line arguments Returns: argparse.Namespace: command line args """ # create parser parser = argparse.ArgumentParser( description='Instructs server to commence a given content ID. ' 'Will execute a local media player once playback has started.') parser.add_argument( '--content-id', help='content ID to stream', metavar='HASH', required=True, ) parser.add_argument( '--player', help='media player to execute once stream active', default=DEFAULT_MEDIA_PLAYER, ) parser.add_argument( '--server', default=DEFAULT_SERVER_HOSTNAME, help='server hostname, defaults to %(default)s', metavar='HOSTNAME', ) parser.add_argument( '--port', default=DEFAULT_SERVER_PORT, help='server HTTP API port, defaults to %(default)s', ) parser.add_argument( '--multi-players', action="store_true", help='play stream in multiple players mode, defaults to %(default)s', ) parser.add_argument( '-d', '--debug', action="store_true", help='run client in debug mode', ) args = parser.parse_args() if not re.match(r'^[a-f0-9]{40}$', args.content_id): # if content id is not a valid hash, quit program logging.error('Invalid content ID: [{}]'.format(args.content_id)) sys.exit(1) return args if __name__ == "__main__": args = parse_args() if args.debug: logging.basicConfig(format=FORMAT, level=logging.DEBUG) else: logging.basicConfig(format=FORMAT, level=logging.INFO) with Client(args.server, args.port) as client: client.run(args.content_id, args.player)
# -*- coding: utf-8 -*- import requests class BoardSettings: def __init__(self, board): retry_attempts = 10 for attempt in range(retry_attempts): settings_url = "https://8ch.net/settings.php?board={0}".format(board) try: r = requests.get(settings_url) j = r.json() except requests.exceptions.Timeout: print("Could not connect to {0}, retrying (retry {1}/{2})", settings_url, attempt, retry_attempts) continue except requests.exceptions.RequestException as e: print(e) exit() except ValueError as e: print("Failed to get json response from {0}\nError: {1}, retrying (retry {2}/{3})".format(settings_url, e, attempt, retry_attempts)) continue break self.allowed_extensions = [".{0}".format(ext) for ext in j["allowed_ext"] + j["allowed_ext_files"]] self.max_files = j["max_images"] self.captcha_enabled = j["captcha"]["enabled"] self.new_thread_captcha = j["new_thread_capt"]
import unittest from keydra.providers import cloudflare from keydra.exceptions import DistributionException from keydra.exceptions import RotationException from unittest.mock import call from unittest.mock import MagicMock from unittest.mock import patch CF_CREDS = {'manage_tokens.secret': 'pass'} CF_SECRET_ALL = { 'key': 'all', 'provider': 'cloudflare', } CF_SECRET_ALL = { 'key': 'all', 'provider': 'cloudflare', } class TestProviderCloudflare(unittest.TestCase): def test_distribute(self): cli = cloudflare.Client(credentials=CF_CREDS) with self.assertRaises(DistributionException): cli.distribute('bla', 'somewhere') @patch.object(cloudflare.Client, '_rotate') def test_rotate(self, mk_r): cli = cloudflare.Client(credentials=CF_CREDS) cli.rotate({'key': 'all'}) mk_r.assert_called_once_with('all') def test__rotate_bad_verify(self): cli = cloudflare.Client(credentials=CF_CREDS) cli._client = MagicMock() cli._client.verify.return_value = {'success': False} with self.assertRaises(RotationException): cli._rotate('all') cli._client.verify.side_effect = Exception('Boom') with self.assertRaises(RotationException): cli._rotate('all') def test__rotate_bad_list_tokens(self): cli = cloudflare.Client(credentials=CF_CREDS) cli._client = MagicMock() cli._client.verify.return_value = {'success': True} cli._client.list_tokens.return_value = {'result': []} with self.assertRaises(RotationException): cli._rotate('all') cli._client.list_tokens.side_effect = Exception('Boom') with self.assertRaises(RotationException): cli._rotate('all') def test__rotate_bad_roll_token(self): cli = cloudflare.Client(credentials=CF_CREDS) cli._client = MagicMock() cli._client.verify.return_value = {'success': True} cli._client.list_tokens.return_value = { 'result': [{'id': 'id', 'name': 'name'}] } cli._client.roll_token.return_value = {'success': False} with self.assertRaises(RotationException): cli._rotate('all') cli._client.roll_token.side_effect = Exception('Boom') with self.assertRaises(RotationException): cli._rotate('all') def test__rotate(self): cli = cloudflare.Client(credentials=CF_CREDS) cli._client = MagicMock() cli._client.verify.return_value = {'success': True} cli._client.roll_token.return_value = { 'result': 'some_secret', 'success': True } cli._client.list_tokens.return_value = { 'result': [ { 'id': 'd475f3be504dd5ba4b290018180fe64c', 'name': 'manage_dns', 'status': 'active', 'issued_on': '2020-05-12T01:07:02Z', 'modified_on': '2020-05-12T01:07:02Z', 'last_used_on': None, 'policies': [] }, { 'id': '0e64546b0b4d554208f7bc1435c7dcc9', 'name': 'manage_tokens', 'status': 'active', 'issued_on': '2020-05-12T00:47:52Z', 'modified_on': '2020-05-12T01:06:08Z', 'last_used_on': '2020-05-12T03:19:22Z', 'policies': [] } ], 'result_info': { 'page': 1, 'per_page': 20, 'total_pages': 1, 'count': 2, 'total_count': 2 }, 'success': True, 'errors': [], 'messages': [] } resp = cli._rotate('manage_tokens') self.assertEqual( resp, { 'provider': 'cloudflare', 'manage_tokens.key': '0e64546b0b4d554208f7bc1435c7dcc9', 'manage_tokens.secret': 'some_secret', } ) cli._client.roll_token.assert_called_once_with( '0e64546b0b4d554208f7bc1435c7dcc9' ) cli._client.roll_token.reset_mock() resp = cli._rotate('all') self.assertEqual( resp, { 'provider': 'cloudflare', 'manage_tokens.key': '0e64546b0b4d554208f7bc1435c7dcc9', 'manage_tokens.secret': 'some_secret', 'manage_dns.key': 'd475f3be504dd5ba4b290018180fe64c', 'manage_dns.secret': 'some_secret', } ) cli._client.roll_token.assert_has_calls( [ call('d475f3be504dd5ba4b290018180fe64c'), call('0e64546b0b4d554208f7bc1435c7dcc9'), ] ) cli._client.roll_token.reset_mock() resp = cli._rotate(None) self.assertEqual( resp, { 'provider': 'cloudflare', 'manage_tokens.key': '0e64546b0b4d554208f7bc1435c7dcc9', 'manage_tokens.secret': 'some_secret', 'manage_dns.key': 'd475f3be504dd5ba4b290018180fe64c', 'manage_dns.secret': 'some_secret', } ) cli._client.roll_token.assert_has_calls( [ call('d475f3be504dd5ba4b290018180fe64c'), call('0e64546b0b4d554208f7bc1435c7dcc9'), ] ) def test_redact_result(self): result = { 'status': 'success', 'action': 'rotate_secret', 'value': { 'manage_tokens.secret': 'SECRET_ONE', 'provider': 'cloudflare', 'manage_dns.key': 'd475f3be504dd5ba4b290018180fe64c', 'manage_dns.secret': 'SECRET_TWO', 'manage_tokens.key': '0e64546b0b4d554208f7bc1435c7dcc9' } } r_result = cloudflare.Client.redact_result(result) r_value = r_result['value'] self.assertNotEqual(r_value['manage_tokens.secret'], 'SECRET_ONE') self.assertNotEqual(r_value['manage_dns.secret'], 'SECRET_TWO')
from functools import partial from homely._ui import allowinteractive, note, shellquote, warn from homely._utils import haveexecutable, run __all__ = ["haveexecutable", "execute"] def execute(cmd, stdout=None, stderr=None, expectexit=0, **kwargs): # Executes `cmd` in a subprocess. Raises a SystemError if the exit code # is different to `expecterror`. # # The stdout and stderr arguments for the most part work just like # homely._ui.run(), with the main difference being that when stdout=None or # stderr=None, these two streams will be filtered through the homely's # logging functions instead of being sent directly to the python process's # stdout/stderr. Also, the stderr argument will default to "STDOUT" so that # the timing of the two streams is recorded more accurately. # # If the process absolutely _must_ talk to a TTY, you can use stdout="TTY", # and a SystemError will be raised if homely is being run in # non-interactive mode. When using stdout="TTY", you should omit the stderr # argument. def outputhandler(data, isend, prefix): # FIXME: if we only get part of a stream, then we have a potential bug # where we only get part of a multi-byte utf-8 character. while len(data): pos = data.find(b"\n") if pos < 0: break # write out the line note(data[0:pos].decode('utf-8'), dash=prefix) data = data[pos+1:] if isend: if len(data): note(data.decode('utf-8'), dash=prefix) else: # return any remaining data so it can be included at the start of # the next run return data if stdout == "TTY": if not allowinteractive(): raise SystemError("cmd wants interactive mode") assert stderr is None stdout = None else: if stdout is None: prefix = "1> " if stderr is False else "&> " stdout = partial(outputhandler, prefix=prefix) if stderr is None: if stdout in (False, True): stderr = partial(outputhandler, prefix="2> ") else: stderr = "STDOUT" outredir = ' 1> /dev/null' if stdout is False else '' if stderr is None: errredir = ' 2>&1' else: errredir = ' 2> /dev/null' if stderr is False else '' with note('{}$ {}{}{}'.format(kwargs.get('cwd', ''), ' '.join(map(shellquote, cmd)), outredir, errredir)): returncode, out, err = run(cmd, stdout=stdout, stderr=stderr, **kwargs) if type(expectexit) is int: exitok = returncode == expectexit else: exitok = returncode in expectexit if exitok: return returncode, out, err # still need to dump the stdout/stderr if they were captured if out is not None: outputhandler(out, True, '1> ') if err is not None: outputhandler(err, True, '1> ') message = "Unexpected exit code {}. Expected {}".format( returncode, expectexit) warn(message) raise SystemError(message)
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities __all__ = ['HealthbotArgs', 'Healthbot'] @pulumi.input_type class HealthbotArgs: def __init__(__self__, *, resource_group_name: pulumi.Input[str], sku_name: pulumi.Input[str], location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ The set of arguments for constructing a Healthbot resource. :param pulumi.Input[str] resource_group_name: Specifies The name of the Resource Group in which to create the Healtbot Service. CHaning this forces a new resource to be created. :param pulumi.Input[str] sku_name: The name which should be used for the sku of the service. Possible values are "F0" and "S1". :param pulumi.Input[str] location: Specifies The Azure Region where the resource exists. CHanging this force a new resource to be created. :param pulumi.Input[str] name: Specifies The name of the Healthbot Service resource. Changing this forces a new resource to be created. :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the service. """ pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "sku_name", sku_name) if location is not None: pulumi.set(__self__, "location", location) if name is not None: pulumi.set(__self__, "name", name) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ Specifies The name of the Resource Group in which to create the Healtbot Service. CHaning this forces a new resource to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="skuName") def sku_name(self) -> pulumi.Input[str]: """ The name which should be used for the sku of the service. Possible values are "F0" and "S1". """ return pulumi.get(self, "sku_name") @sku_name.setter def sku_name(self, value: pulumi.Input[str]): pulumi.set(self, "sku_name", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Specifies The Azure Region where the resource exists. CHanging this force a new resource to be created. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Specifies The name of the Healthbot Service resource. Changing this forces a new resource to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A mapping of tags which should be assigned to the service. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) @pulumi.input_type class _HealthbotState: def __init__(__self__, *, bot_management_portal_url: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None): """ Input properties used for looking up and filtering Healthbot resources. :param pulumi.Input[str] bot_management_portal_url: The management portal url. :param pulumi.Input[str] location: Specifies The Azure Region where the resource exists. CHanging this force a new resource to be created. :param pulumi.Input[str] name: Specifies The name of the Healthbot Service resource. Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: Specifies The name of the Resource Group in which to create the Healtbot Service. CHaning this forces a new resource to be created. :param pulumi.Input[str] sku_name: The name which should be used for the sku of the service. Possible values are "F0" and "S1". :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the service. """ if bot_management_portal_url is not None: pulumi.set(__self__, "bot_management_portal_url", bot_management_portal_url) if location is not None: pulumi.set(__self__, "location", location) if name is not None: pulumi.set(__self__, "name", name) if resource_group_name is not None: pulumi.set(__self__, "resource_group_name", resource_group_name) if sku_name is not None: pulumi.set(__self__, "sku_name", sku_name) if tags is not None: pulumi.set(__self__, "tags", tags) @property @pulumi.getter(name="botManagementPortalUrl") def bot_management_portal_url(self) -> Optional[pulumi.Input[str]]: """ The management portal url. """ return pulumi.get(self, "bot_management_portal_url") @bot_management_portal_url.setter def bot_management_portal_url(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "bot_management_portal_url", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Specifies The Azure Region where the resource exists. CHanging this force a new resource to be created. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter def name(self) -> Optional[pulumi.Input[str]]: """ Specifies The name of the Healthbot Service resource. Changing this forces a new resource to be created. """ return pulumi.get(self, "name") @name.setter def name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "name", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> Optional[pulumi.Input[str]]: """ Specifies The name of the Resource Group in which to create the Healtbot Service. CHaning this forces a new resource to be created. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="skuName") def sku_name(self) -> Optional[pulumi.Input[str]]: """ The name which should be used for the sku of the service. Possible values are "F0" and "S1". """ return pulumi.get(self, "sku_name") @sku_name.setter def sku_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "sku_name", value) @property @pulumi.getter def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]: """ A mapping of tags which should be assigned to the service. """ return pulumi.get(self, "tags") @tags.setter def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]): pulumi.set(self, "tags", value) class Healthbot(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): """ Manages a Healthbot Service. ## Import Healthbot Service can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:bot/healthbot:Healthbot example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.HealthBot/healthBots/bot1 ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] location: Specifies The Azure Region where the resource exists. CHanging this force a new resource to be created. :param pulumi.Input[str] name: Specifies The name of the Healthbot Service resource. Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: Specifies The name of the Resource Group in which to create the Healtbot Service. CHaning this forces a new resource to be created. :param pulumi.Input[str] sku_name: The name which should be used for the sku of the service. Possible values are "F0" and "S1". :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the service. """ ... @overload def __init__(__self__, resource_name: str, args: HealthbotArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Manages a Healthbot Service. ## Import Healthbot Service can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:bot/healthbot:Healthbot example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.HealthBot/healthBots/bot1 ``` :param str resource_name: The name of the resource. :param HealthbotArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(HealthbotArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = HealthbotArgs.__new__(HealthbotArgs) __props__.__dict__["location"] = location __props__.__dict__["name"] = name if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name if sku_name is None and not opts.urn: raise TypeError("Missing required property 'sku_name'") __props__.__dict__["sku_name"] = sku_name __props__.__dict__["tags"] = tags __props__.__dict__["bot_management_portal_url"] = None super(Healthbot, __self__).__init__( 'azure:bot/healthbot:Healthbot', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, bot_management_portal_url: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, sku_name: Optional[pulumi.Input[str]] = None, tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Healthbot': """ Get an existing Healthbot resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bot_management_portal_url: The management portal url. :param pulumi.Input[str] location: Specifies The Azure Region where the resource exists. CHanging this force a new resource to be created. :param pulumi.Input[str] name: Specifies The name of the Healthbot Service resource. Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: Specifies The name of the Resource Group in which to create the Healtbot Service. CHaning this forces a new resource to be created. :param pulumi.Input[str] sku_name: The name which should be used for the sku of the service. Possible values are "F0" and "S1". :param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the service. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _HealthbotState.__new__(_HealthbotState) __props__.__dict__["bot_management_portal_url"] = bot_management_portal_url __props__.__dict__["location"] = location __props__.__dict__["name"] = name __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["sku_name"] = sku_name __props__.__dict__["tags"] = tags return Healthbot(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="botManagementPortalUrl") def bot_management_portal_url(self) -> pulumi.Output[str]: """ The management portal url. """ return pulumi.get(self, "bot_management_portal_url") @property @pulumi.getter def location(self) -> pulumi.Output[str]: """ Specifies The Azure Region where the resource exists. CHanging this force a new resource to be created. """ return pulumi.get(self, "location") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Specifies The name of the Healthbot Service resource. Changing this forces a new resource to be created. """ return pulumi.get(self, "name") @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Output[str]: """ Specifies The name of the Resource Group in which to create the Healtbot Service. CHaning this forces a new resource to be created. """ return pulumi.get(self, "resource_group_name") @property @pulumi.getter(name="skuName") def sku_name(self) -> pulumi.Output[str]: """ The name which should be used for the sku of the service. Possible values are "F0" and "S1". """ return pulumi.get(self, "sku_name") @property @pulumi.getter def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]: """ A mapping of tags which should be assigned to the service. """ return pulumi.get(self, "tags")
# -*- coding: utf-8 -*- __author__= "姜维洋" import re import time import scrapy from scrapy import Request,FormRequest from WEIBOSEARCH.items import WeibosearchItem from WEIBOSEARCH.settings import TOPIC, max_page class WeibosearchSpider(scrapy.Spider): name = 'WeiboSearch' search_url = 'https://weibo.cn/search/mblog?hideSearchFrame=&keyword={topic}&advancedfilter=1&endtime={time}&sort=hot' # allowed_domains = ['weibo.cn'] # start_urls = ['http://weibo.cn/'] keyword_search = 100 def parse_time(self,datetime): if re.match("今天.*", datetime): datetime = time.strftime("%Y-%m-%d", time.localtime()) + re.match("今天(.*)", datetime).group(1) if re.match("\d+分钟前", datetime): minute = re.match("(\d+)分钟前", datetime).group(1) datetime = time.strftime("%Y-%m-%d %H:%M", time.localtime(time.time() - float(minute) * 60)) if re.match("\d+月\d+日", datetime): month = re.match("(\d+)月(\d+)日", datetime).group(1) day = re.match("(\d+)月(\d+)日", datetime).group(2) datetime = time.strftime("%Y", time.localtime()) + "-" + month + "-" + day return datetime def start_requests(self): print("按照搜索信息爬取数据") for topic in TOPIC: for page in range(max_page): form_data = { "mp": str(self.keyword_search), "page": str(page) } yield FormRequest(url=self.search_url.format(topic=topic, time=time.strftime("%Y%m%d", time.localtime())), callback=self.parse_search, formdata=form_data) break # 测试代码 def parse_search(self, response): content_urls = response.xpath("//div[@class='c']//a[@class='cc']") for content_url in content_urls: content_url = content_url.xpath(".//@href").get().strip() yield Request(url=content_url, callback=self.parse_search_comment) def parse_search_comment(self,response): url = response.url url_id = re.search(r".*/(.+)\?.+?", url).group(1) share_count = response.xpath("//a[contains(.,'转发[')]/text()").re_first("转发\[(.*?)\]") comment_count = response.xpath("//span[contains(.,'评论[')]/text()").re_first("评论\[(.*?)\]") like_count = response.xpath("//a[contains(.,'赞[')]/text()").re_first("赞\[(.*?)\]") author = response.xpath("//div[@id='M_']//a[1]/text()").get().strip() pub_time = response.xpath("//div[@id='M_']//span[@class='ct']/text()").get().strip() pub_time = self.parse_time(pub_time) content_comments = response.xpath("//div[@id='M_']//span[@class='ctt']/text()").getall() content_comment = "".join(content_comments) item = WeibosearchItem(url_id=url_id,share_count=share_count,comment_count=comment_count,like_count=like_count,author=author,pub_time=pub_time,content_comment=content_comment,url=url) return item
class PnWKitException(Exception): ... class GraphQLError(PnWKitException): ...
# -*- coding: utf-8 -*- """ CNN 'sparrow' from Grill/Schlüter, EUSIPCO 2017. Provides create(). Can be used as a local predictor in audioclass/__init__.py. Author: Jan Schlüter """ import torch.nn as nn import torch.nn.functional as F from . import ReceptiveField class Sparrow(nn.Module): """ CNN from Grill/Schlüter, EUSIPCO 2017. """ def __init__(self, num_channels, num_bands, num_outputs=1, output_bias=False, global_pool=False): super(Sparrow, self).__init__() self.global_pool = global_pool lrelu = nn.LeakyReLU(0.01) self.conv_stage = nn.ModuleList([ nn.Conv2d(num_channels, 32, 3, bias=False), nn.BatchNorm2d(32), lrelu, nn.Conv2d(32, 32, 3, bias=False), nn.BatchNorm2d(32), nn.MaxPool2d(3), lrelu, nn.Conv2d(32, 32, 3, bias=False), nn.BatchNorm2d(32), lrelu, nn.Conv2d(32, 32, 3, bias=False), nn.BatchNorm2d(32), lrelu, nn.Conv2d(32, 64, ((num_bands-4)//3-6, 3), bias=False), nn.BatchNorm2d(64), nn.MaxPool2d(3), lrelu, nn.Dropout(), nn.Conv2d(64, 256, (1, 9), bias=False), nn.BatchNorm2d(256), lrelu, nn.Dropout(), nn.Conv2d(256, 64, 1, bias=False), nn.BatchNorm2d(64), lrelu, nn.Dropout(), nn.Conv2d(64, num_outputs, 1, bias=output_bias), ]) self.receptive_field = ReceptiveField(103, 9, 0) def forward(self, x): for layer in self.conv_stage: x = layer(x) if x.shape[1] == 1: x = x.flatten(1) else: x = x.flatten(2) if self.global_pool: x, _ = x.max(dim=-1) return x def create(cfg, shapes, dtypes, num_classes): """ Instantiates a Model for the given data shapes and dtypes. """ num_channels = shapes['input'][0] num_bands = cfg['filterbank.num_bands'] num_outputs = 1 if num_classes == 2 else num_classes output_bias = cfg.get('model.output_bias', 0) return Sparrow(num_channels, num_bands, num_outputs, output_bias, global_pool=False)
#!/usr/bin/env python # -*- coding: utf-8 -*- import warnings from ._tsrex import index, indices, tensor, TsrEx from ._tplex import template from ._constants import pi from ._special import zeros, ones, eye try: import google.colab # noqa: F401 try: from IPython import get_ipython from IPython.display import display, HTML get_ipython().events.register( 'pre_run_cell', lambda: display(HTML( "<script type='text/javascript' async " "src='https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.7/" "MathJax.js?config=TeX-MML-AM_CHTML'></script>" )) ) except Exception: warnings.warn('Cannot set up MathJAX, LaTeX rendering may not work.') except ImportError: pass __all__ = [ 'index', 'indices', 'tensor', 'template', 'zeros', 'ones', 'eye', 'pi', 'TsrEx' ]
# pylint: disable=unused-wildcard-import import os import jstyleson import sys from .cli.colorized_print import * def read_json_file(file_path): with open(file_path, 'r') as file: file_content = file.read() return jstyleson.loads(file_content) def write_json_file(file_path, config): with open(file_path, 'w') as file: file.write(config) def get_config_pathes(): script_dir = os.path.dirname(os.path.abspath(__file__)) config_base_path = os.path.join(script_dir, 'config.base.jsonc') # because of __main__ hack if not os.path.exists(config_base_path): script_dir = os.path.dirname(script_dir) config_base_path = os.path.join(script_dir, 'config.base.jsonc') app_config_dir = os.path.expanduser('~/.config/prman') if not os.path.exists(app_config_dir): os.mkdir(app_config_dir) config_path = os.path.join(app_config_dir, 'config.json') return (config_base_path, config_path) def add_config_kvp(key, value): config_base_path, config_path = get_config_pathes() config_base = read_json_file(config_base_path) if not key in config_base.keys() and key != 'gitlab.token': print_red(f'Unknown config key \'{key}\'.') sys.exit(1) config = read_json_file(config_path) if os.path.exists(config_path) else { } config[key] = value config_json = jstyleson.dumps(config, indent=2) write_json_file(config_path, config_json) def read_config(): config_base_path, config_path = get_config_pathes() config_base = read_json_file(config_base_path) config = read_json_file(config_path) if os.path.exists(config_path) else { } for key in config_base.keys(): config[key] = config.get(key, config_base[key]) if config.get('gitlab.token', None) is None: print_red('Config gitlab.token is not specified. You can create one here https://gitlab.com/profile/personal_access_tokens') sys.exit(1) return config
import random def run (autoTester): def f(): return 3, 4 autoTester.check ((1, 2, *f())) def g(): return [3, 4] autoTester.check ([1, 2, *f()]) # doesn't compile # d1 = {'a':1, 'b':2} # d2 = {'c':3, **d1} # autoTester.check(str(d2))
import torch import glob import unicodedata import string import random all_letters = string.ascii_letters + " .,;'-" n_letters = len(all_letters) def find_files(path): return glob.glob(path) # Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427 def unicode2ascii(s): return ''.join( c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn' and c in all_letters ) # Read a file and split into lines def read_lines(filename): lines = open(filename).read().strip().split('\n') return [unicode2ascii(line) for line in lines] # Build the category_lines dictionary, a list of lines per category category_lines = {} all_categories = [] for filename in find_files('../data/names/*.txt'): category = filename.split('/')[-1].split('.')[0] all_categories.append(category) lines = read_lines(filename) category_lines[category] = lines n_categories = len(all_categories) # Find letter index from all_letters, e.g. "a" = 0 def letter2index(letter): return all_letters.find(letter) # Turn a line into a <line_length x 1 x n_letters>, # or an array of one-hot letter vectors def line2tensor(line): tensor = torch.zeros(len(line), 1, n_letters) for li, letter in enumerate(line): tensor[li][0][letter2index(letter)] = 1 return tensor def category_from_output(output): top_n, top_i = output.data.topk(1) # Tensor out of Variable with .data category_i = top_i[0][0] return all_categories[category_i], category_i def random_choice(l): return l[random.randint(0, len(l) - 1)] def random_training_pair(): category = random_choice(all_categories) line = random_choice(category_lines[category]) category_tensor = torch.LongTensor([all_categories.index(category)]) line_tensor = line2tensor(line) return category, line, category_tensor, line_tensor
from rules_reader import read_rules, empty_char from sys import argv, stderr import json def parse_rule_label( inpc: dict, inpdata: str, rules, label: str, recsel={}, recpath="", simple_terminal=False, loglevel=0, emptyExpansion=True, recLog=None, ): """Tenta expandir recursivamente o label (regra) de acordo com a entrada (inpdata) e o cursor dela (inpc). Recebe o conjunto de regras (rules) e o nome da regra a ser interpretada (label). Se simple_terminal=True, então as folhas da árvore serão representadas de maneira simplificada. Também recebe, opcionalmente, um caminho completo da recursividade em string e um dicionário contendo as terminações ambíguas demarcadas por este caminho recursivo. parse_permutations utiliza estes dois últimos parâmetros para permutar as combinações ambíguas até encontrar alguma terminação que gere algum resultado viável. loglevel aumenta o nível de log produzido no stderr. emptyExpansion re-aplica os casos da terminação nula sobre a pilha de recursividade (Experimental, pode dificultar o debug). Retorna uma lista de produções, se for viável; uma lista vazia (caso a regra tenha uma terminação vazia); ou None caso não seja possível aplicar esta regra na entrada. """ exprules = rules[1][rules[0][label]]["expr_rules"] def is_empty_rule(rs): return len(rs) == 1 and rs[0] == empty_char if recLog is None: recLog = {} if not 'recpath' in recLog: recLog['recpath'] = '' productions = [] for rs in exprules: # Para cada alternativa de expansão da regra: curs = inpc.copy() # Faça uma cópia de onde está o cursor applies = [] # Salva o que será retornado - se será retornado. for r in rs: # Para cada palavra da alternativa # Verifica se o cursor já chegou no fim do arquivo. if curs["cursor"] >= len(inpdata): applies = None # Se sim, aborta a regra. break c = inpdata[curs["cursor"]] # Token lido rh = r[0] # Sinal (@#$%£) rt = r[1:] # Palavra act = { "#": c["token"] == rt and c["class"] != "identificador", "$": c["class"] == "identificador", "%": c["class"] == rt } act_g = act.get(rh, None) if not act_g is None: # Sinal/Token simples: Identificador/Reservado/Aditivo/Classe/etc if act_g: # Se satisfaz o sinal, o token é aceito t = (r + ':' + c['token']) if simple_terminal else { 'symbol': r, 'token': c['token'] } applies.append(t) curs["cursor"] += 1 else: # Senão, reverte. applies = None break elif rh == "@": # Invocação de outra regra fullpath = '->'.join([recpath, label, str(exprules.index(rs)), r]) reapp = parse_rule_label(curs, inpdata, rules, rt, recsel, fullpath, simple_terminal, loglevel, emptyExpansion, recLog) if reapp is None: # Se a regra não pode ser derivada, aborta esta expansão. applies = None break elif len(reapp) > 0: # Senão, adiciona a expansão. applies.append({ 'symbol': r, 'expansion': reapp }) elif rh != empty_char: # Token inválido: aborta a expansão. applies = None break if applies is None: # Se a expansão foi abortada, tenta outra regra alternativa. continue elif len(applies) == 0 and emptyExpansion: # Se a expansão é nula, deixe-a nula. productions.append((applies, curs, empty_char, [empty_char], label)) else: # Senão, diz que o label produz esta expansão. productions.append((applies, curs, r, rs, label)) prodind = 0 # Retorna a única expansão, se possível. if len(productions) > 1: # Se há mais de uma expansão, temos uma ambiguidade! s = recsel.get(recpath, None) if s is None: # Se a ambiguidade ainda não foi detectada, registramos-na. if loglevel >= 2 and len(productions) == 2 and len([x for x in productions if x[2] == empty_char]) == 1: print('Indício de ambiguidade: Possível caminho vazio não tomado.', file=stderr) elif loglevel >= 1: print('Indício de ambiguidade: Revise as regras e o modelo de saída.', file=stderr) if loglevel >= 3: for prod in productions: print(' Em {}=>{{ {} }}=>{}'.format(prod[4], ' '.join(prod[3]), prod[2])) print('') s = (0, len(productions), label, recpath, productions) # Por padrão, vamos tentar resolver o modelo recsel[recpath] = s # usando a primeira correspondência encontrada. # Se a solução se demonstrar inviavel, parse_permutations irá alterar o valor padrão # na tentativa de encontrar alguma solução viável. prodind = s[0] # Retornamos uma das expansões elif len(productions) == 0: # Se o label não produz nenhuma expansão. hasempty = len([x for x in exprules if is_empty_rule(x)]) > 0 if hasempty: # Verificamos se existe a regra vazia no conjunto return [] # Se sim, retorna uma produção vazia. # Se acontece algo de errado, grava em um log. if not recLog['recpath'].startswith(recpath): # Este log grava o último erro de um nó folha da árvore, isto é, # se um nó que não é folha encontra este erro, verifica se o último erro registrado não é subfilho dele; # se for subfilho, ignora; se não for, assume que este é um nó folha e define último erro = atual erro. # Isso é feito olhando o caminho completo da recursão (se o último erro registrado foi gerado através desta # recursão, então o erro não deve ser sobrescrito; do contrário, sobrescreve o erro). recLog['recpath'] = recpath recLog['label'] = label recLog['inpc'] = inpc.copy() return None # Se não, retorna que a produção é inviavel. inpc["cursor"] = productions[prodind][1]["cursor"] # Reposiciona o cursor return productions[prodind][0] # Retorna a expansão. def parse_permutations(inpdata, rules, psel={}, pcounter=None, *args, **kwargs): '''Tenta interpretar um programa inserido (inpdata) usando as regras fornecidas (rules) através de sucessivas interações entre os resultados ambíguos. Retorna assim que encontra algum resultado viável. Se não encontrar, retorna None. ''' inpcursor = { "cursor": 0 } if pcounter is None: pcounter = { 'counter': 0, 'logs': [] } locked = psel.keys() # Marca as ambiguidades que esse método não deve mexer (pois o método é recursivo) ps = psel.copy() # Faça uma cópia completa do dicionário de ambiguidades e passa-o para o método # Cada ambiguidade de ps consiste de um endereço completo das derivações das regras. recLog={} r = parse_rule_label(inpcursor, inpdata, rules, "program", ps, recLog=recLog, *args, **kwargs) pcounter['counter'] += 1 if not r is None: # Se o método foi bem sucedido, retorna-o! if pcounter['counter'] != 1: print('Programa gerado na {}-esima árvore.'.format(pcounter['counter']), file=stderr) return r, pcounter['logs'] for p in ps: # Senão, olha todas as ambiguidades e permuta-as. if not p in locked: # Se a atual pilha pode mexer na ambiguidade l = ps[p][1] # Número de ambiguidades para este endereço. o = ps[p][0] # Qual ambiguidade foi escolhida. for i in range(o + 1, l): # Para cada possível ambiguidade, itere sobre as possíveis ramificações ps[p] = (i, l) # Mude a ramificação e chame o método recursivamente. rec = parse_permutations(inpdata, rules, ps.copy(), pcounter, *args, **kwargs) if not rec[0] is None: # Se houve sucesso, simplesmente retorna return rec ps[p] = (o, l) # Se não houve sucesso, restaure o estado inicial para a próxima iteração. pcounter['logs'].append(recLog) return None, pcounter['logs'] def parse_program(inp, rules, *args, **kwargs): '''Invoca parse_permutations, recebendo a entrada (pre-processada pelo analisador léxico) e o conjunto de regras. ''' def pdt2dict(d): dt = d.split("|") return {"token": dt[0], "a": dt[0], "class": dt[1], "line": dt[2]} r, logs = parse_permutations([pdt2dict(d) for d in inp.strip().split("\n")], rules, *args, **kwargs) if r is None: print('Could not generate a valid program.', file=stderr) print('There are {} invalid trees:'.format(len(logs)), file=stderr) print(json.dumps(logs), file=stderr) print('', file=stderr) return r def print_help_msg(autoexit=True): print('Usage: python sintatico.py [-r -s] {input file} {output file}') print('Or: python sintatico.py [-r -s] {input file}') print('Or: python sintatico.py [-r -s] -i {output file}') print('Or: python sintatico.py [-r -s] -i') print('Or: python sintatico.py -h|--help') print('Where -r: Supress exceptions on 1-st level left recursion.') print(' -s: Show terminal expansions as strings.') print(' -i: Read input from the stdin.') print(' -h or --help: Show this message.') if autoexit: exit() if __name__ == "__main__": """Por padrão, ou recebe o arquivo de entrada como argumento da linha de comando, ou lê o caminho da entrada padrão. """ if '-h' in argv or '--help' in argv: print_help_msg() cargs = list(filter(lambda x: len(x) != 2 or x[0] != '-', argv[1:])) finp, fout = None, None if '-i' in argv: finp = '' try: while True: finp += input() + '\n' except EOFError: pass else: if len(cargs) == 0: print_help_msg() finp = cargs.pop(0) with open(finp, "r") as f: finp = f.read() if len(cargs) == 1: fout = cargs[0] rules = read_rules(raiseOnLeftRecursion=(not "-r" in argv)) p0 = parse_program(finp, rules, simple_terminal=('-s' in argv)) p0json = json.dumps(p0) if fout is None: print(p0json) else: with open(fout, "w") as f: f.write(p0json)
from gpconfig_modules.database_segment_guc import DatabaseSegmentGuc from gpconfig_modules.file_segment_guc import FileSegmentGuc from gpconfig_modules.segment_guc import SegmentGuc class MultiValueGuc(SegmentGuc): """ encapsulate various GUC locations within a given segment. A segment can include 2 databases: the primary and a mirror. The database value is singular, since we strongly expect the values to be the same, given mirroring. However, the file values of primary and mirror can be different. So we model this MultiValueGuc object to accept 2 file values, and one database value. """ def __init__(self, guc1, guc2): """ accept 2 gucs in any order. gucs can be any combination of: * database guc * file guc - primary - mirror * existing comparison guc, with or without mirror """ self.primary_file_seg_guc = None self.mirror_file_seg_guc = None self.db_seg_guc = None if not guc1 or not guc2: raise Exception("comparison requires two gucs") SegmentGuc.__init__(self, [guc1.context, guc1.name]) if guc1.context != guc2.context: raise Exception("Not the same context") if isinstance(guc1, MultiValueGuc): # copy constructor self.db_seg_guc = guc1.db_seg_guc self.primary_file_seg_guc = guc1.primary_file_seg_guc self.mirror_file_seg_guc = guc1.mirror_file_seg_guc if isinstance(guc2, MultiValueGuc): # copy constructor self.db_seg_guc = guc2.db_seg_guc self.primary_file_seg_guc = guc2.primary_file_seg_guc self.mirror_file_seg_guc = guc2.mirror_file_seg if isinstance(guc1, FileSegmentGuc): if self.primary_file_seg_guc: if self.primary_file_seg_guc.dbid == guc1.dbid: self.primary_file_seg_guc = guc1 else: self.mirror_file_seg_guc = guc1 else: self.primary_file_seg_guc = guc1 if isinstance(guc2, FileSegmentGuc): if self.primary_file_seg_guc: if self.primary_file_seg_guc.dbid == guc2.dbid: self.primary_file_seg_guc = guc2 else: self.mirror_file_seg_guc = guc2 else: self.primary_file_seg_guc = guc2 if isinstance(guc1, DatabaseSegmentGuc): self.db_seg_guc = guc1 if isinstance(guc2, DatabaseSegmentGuc): self.db_seg_guc = guc2 def report_success_format(self): file_val = self.primary_file_seg_guc.get_value() if self.db_seg_guc: result = "%s value: %s | file: %s" % (self.get_label(), self.db_seg_guc.value, self._use_dash_when_none(file_val)) else: result = "%s value: %s" % (self.get_label(), file_val) return result def report_fail_format(self): sort_seg_guc_objs = [obj for obj in [self.primary_file_seg_guc, self.mirror_file_seg_guc] if obj] sort_seg_guc_objs.sort(key=lambda x: x.dbid) if self.db_seg_guc: report = [self._report_fail_format_with_database_and_file_gucs(seg_guc_obj) for seg_guc_obj in sort_seg_guc_objs] else: report = [seg_guc_obj.report_fail_format()[0] for seg_guc_obj in sort_seg_guc_objs] return report def _report_fail_format_with_database_and_file_gucs(self, segment_guc_obj): return "[context: %s] [dbid: %s] [name: %s] [value: %s | file: %s]" % ( self.db_seg_guc.context, segment_guc_obj.dbid, self.db_seg_guc.name, self.db_seg_guc.value, self._use_dash_when_none(segment_guc_obj.value)) def _use_dash_when_none(self, value): return value if value is not None else "-" def is_internally_consistent(self): if not self.db_seg_guc: return self.compare_primary_and_mirror_files() else: if self.primary_file_seg_guc is None: return True if self.primary_file_seg_guc.get_value() is None: return True result = True if self.mirror_file_seg_guc and self.db_seg_guc: result = self.mirror_file_seg_guc.value == self.db_seg_guc.value if not result: return result return self.db_seg_guc.value == self.primary_file_seg_guc.value and result def get_value(self): file_value = "" if self.primary_file_seg_guc: file_value = str(self.primary_file_seg_guc.get_value()) db_value = "" if self.db_seg_guc: db_value = str(self.db_seg_guc.get_value()) return db_value + "||" + file_value def set_mirror_file_segment(self, mirror_file_seg): self.mirror_file_seg_guc = mirror_file_seg def get_primary_dbid(self): return self.primary_file_seg_guc.dbid def set_primary_file_segment(self, guc): self.primary_file_seg_guc = guc def compare_primary_and_mirror_files(self): if self.primary_file_seg_guc and self.mirror_file_seg_guc: return self.primary_file_seg_guc.get_value() == self.mirror_file_seg_guc.get_value() return True
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests the transport_process.py module.""" import time import unittest from unittest import mock from gazoo_device.switchboard import data_framer from gazoo_device.switchboard import switchboard_process from gazoo_device.switchboard import transport_process from gazoo_device.switchboard import transport_properties from gazoo_device.tests.unit_tests.utils import fake_transport from gazoo_device.tests.unit_tests.utils import unit_test_case from gazoo_device.utility import multiprocessing_utils _EXCEPTION_TIMEOUT = 3 _LOG_MESSAGE_TIMEOUT = 1 get_queue_size = unit_test_case.get_queue_size wait_for_queue_writes = switchboard_process.wait_for_queue_writes class TransportProcessTests(unit_test_case.MultiprocessingTestCase): def setUp(self): super().setUp() self.command_queue = multiprocessing_utils.get_context().Queue() self.log_queue = multiprocessing_utils.get_context().Queue() self.raw_data_queue = multiprocessing_utils.get_context().Queue() self.call_result_queue = multiprocessing_utils.get_context().Queue() def tearDown(self): if hasattr(self, "uut"): del self.uut # Release shared memory file descriptors used by queues. del self.command_queue del self.log_queue del self.raw_data_queue del self.call_result_queue super().tearDown() def test_000_transport_construct_destruct(self): """Test TransportProcess constructing and destructing raises no errors.""" transport = None self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.assertFalse(self.uut.is_started(), "Expected process not started, found started") self.assertFalse(self.uut.is_running(), "Expected process to not running, found running") def test_001_transport_enqueue_command_writes_below_split(self): """Test _enqueue_command_writes can split commands below max write limit.""" write_queue = multiprocessing_utils.get_context().Queue() command = "short command" transport_process._enqueue_command_writes(write_queue, command) wait_for_queue_writes(write_queue) self._verify_command_split(command, write_queue) def test_002_transport_enqueue_command_writes_above_split(self): """Test _enqueue_command_writes splits commands below max write limit.""" write_queue = multiprocessing_utils.get_context().Queue() command = ("this will be a really long command that exceeds the 32 byte " "limit") transport_process._enqueue_command_writes(write_queue, command) wait_for_queue_writes(write_queue) self._verify_command_split(command, write_queue) def test_100_transport_accepts_valid_transport_commands(self): """Test send_command accepts valid transport commands.""" transport = None self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) for command in transport_process._ALL_VALID_COMMANDS: self.uut.send_command(command) wait_for_queue_writes(self.command_queue) self.assertFalse(self.command_queue.empty(), "Expected command queue to not be empty") command_message = self.command_queue.get() self.assertEqual( command, command_message[0], "Expected command {} found {}".format(command, command_message[0])) def test_110_transport_toggle_raw_data_raises_error(self): """Test toggle_raw_data raises error.""" transport = None self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) with self.assertRaisesRegex(RuntimeError, r"No queue provided"): self.uut.toggle_raw_data() def test_111_transport_toggle_raw_data_toggles(self): """Test toggle_raw_data toggles raw_data_enable.""" transport = None self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue, raw_data_queue=self.raw_data_queue) self.assertFalse(self.uut.raw_data_enabled(), "Expected raw_data streaming to be disabled") self.uut.toggle_raw_data() self.assertTrue(self.uut.raw_data_enabled(), "Expected raw_data streaming to be enabled") self.uut.toggle_raw_data() self.assertFalse(self.uut.raw_data_enabled(), "Expected raw_data streaming to be disabled") def test_120_transport_get_raw_data_raises_error(self): """Test get_raw_data raises error.""" transport = None self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) with self.assertRaisesRegex(RuntimeError, r"No queue provided"): self.uut.get_raw_data() def test_200_transport_opens_and_closes_transport(self): """Test transport process calls transport open and close.""" transport = fake_transport.FakeTransport() self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.uut.start() self.uut.stop() self.assertEqual( 1, transport.open_count.value, "Expected transport.open to be called {} called {}".format( 1, transport.open_count.value)) self.assertEqual( 1, transport.close_count.value, "Expected transport.close to be called {} called {}".format( 1, transport.close_count.value)) def test_201_transport_closes_transport_on_command(self): """Test transport closes transport on command.""" transport = mock.MagicMock(spec=fake_transport.FakeTransport) self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.command_queue.put((transport_process.CMD_TRANSPORT_CLOSE, None)) wait_for_queue_writes(self.command_queue) self.uut._pre_run_hook() self.assertTrue(self.uut.is_open(), "Expected transport process to be opened, found closed") self.uut._do_work() transport.close.assert_called_once() transport.close.assert_called_once() self.uut._post_run_hook() transport.open.assert_called_once() self.assertEqual( 2, transport.close.call_count, "Expected transport.close to be called {} called {}".format( 2, transport.close.call_count)) def test_202_transport_opens_transport_on_command(self): """Test transport opens transport on command.""" transport = mock.MagicMock(spec=fake_transport.FakeTransport) self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.command_queue.put((transport_process.CMD_TRANSPORT_OPEN, None)) wait_for_queue_writes(self.command_queue) self.uut._pre_run_hook() self.assertTrue(self.uut.is_open(), "Expected transport process to be opened, found closed") self.uut._do_work() self.assertTrue(self.uut.is_open(), "Expected transport process to be opened, found closed") self.uut._post_run_hook() self.assertEqual( 2, transport.open.call_count, "Expected transport.open to be called {} called {}".format( 2, transport.open.call_count)) transport.close.assert_called_once() def test_203_transport_skips_opens_on_start(self): """Test transport process calls transport open and close.""" transport = fake_transport.FakeTransport() transport.set_property(transport_properties.OPEN_ON_START, False) self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.uut.start() self.uut.stop() self.assertEqual( 0, transport.open_count.value, "Expected transport.open to be called {} called {}".format( 0, transport.open_count.value)) self.assertEqual( 1, transport.close_count.value, "Expected transport.close to be called {} called {}".format( 1, transport.close_count.value)) def test_204_transport_auto_reopen_with_close(self): """Transport process shouldn't reopen after being closed via close().""" transport = mock.MagicMock(spec=fake_transport.FakeTransport) transport._properties = {} transport._properties[transport_properties.AUTO_REOPEN] = True transport._transport_open = mock.MagicMock( spec=multiprocessing_utils.get_context().Event()) transport.is_open.side_effect = iter([False, False]) self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.uut._pre_run_hook() transport.open.assert_called_once() self.command_queue.put((transport_process.CMD_TRANSPORT_CLOSE, None)) wait_for_queue_writes(self.command_queue) self.uut._do_work() transport.close.assert_called() self.uut._do_work() # Shouldn't reopen here self.uut._post_run_hook() transport.open.assert_called_once() def test_205_transport_auto_reopen_unexpected_close(self): """Test transport process reopens if it closes unexpectedly.""" transport = mock.MagicMock(spec=fake_transport.FakeTransport) transport._properties = {} transport._properties[transport_properties.AUTO_REOPEN] = True transport._transport_open = mock.MagicMock( spec=multiprocessing_utils.get_context().Event()) transport.is_open.side_effect = iter([False]) self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.uut._pre_run_hook() transport.open.assert_called_once() self.uut._do_work() # Should reopen here self.assertEqual( 2, transport.open.call_count, "Expected transport.open to be called {} called {}".format( 2, transport.open.call_count)) # If transport closes unexpectedly, transport.close() should be called # before re-opening the transport to clean up resources (b/183527797). transport.close.assert_called_once() self.uut._post_run_hook() self.assertEqual(transport.close.call_count, 2) def test_210_transport_rejects_invalid_command(self): """Test transport rejects invalid command.""" transport = fake_transport.FakeTransport() self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.command_queue.put(("invalid cmd", None)) wait_for_queue_writes(self.command_queue) self.uut._pre_run_hook() with self.assertRaisesRegex(RuntimeError, r"received an unknown command"): self.uut._do_work() self.uut._post_run_hook() def test_211_transport_writes_split_commands(self): """Test transport writes split commands.""" transport = mock.MagicMock(spec=fake_transport.FakeTransport) transport.read.return_value = (b"this will be a really long command that " b"will be split") self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.uut._pre_run_hook() long_command = "this will be a really long command that will be split" self.uut.send_command(transport_process.CMD_TRANSPORT_WRITE, long_command) wait_for_queue_writes(self.command_queue) self.uut._do_work() self.uut._do_work() self.uut._post_run_hook() self.assertEqual( 2, transport.write.call_count, "Expecting write to be called twice, found {}".format( transport.write.call_count)) def test_220_transport_can_disable_raw_data_queue(self): """Test transport can disable raw_data_queue.""" device_data1 = b"some device message\n" device_data2 = b"other device message\n" transport = mock.MagicMock(spec=fake_transport.FakeTransport) transport.read.side_effect = iter([device_data1, device_data2]) raw_data_id = 1 self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue, raw_data_queue=self.raw_data_queue, raw_data_id=raw_data_id) self.uut._pre_run_hook() self.uut.toggle_raw_data() # enable raw data for first message self.uut._do_work() # publish first raw data message self.uut.toggle_raw_data() # disable raw data for second message self.uut._do_work() # skips publishing second raw data message self.uut._post_run_hook() self.assertEqual( 1, self.raw_data_queue.qsize(), "Expected 1 entry in raw_data_queue found {}".format( self.raw_data_queue.qsize())) self.assertEqual( 2, self.log_queue.qsize(), "Expected 2 entries in log_queue found {}".format( self.log_queue.qsize())) data_id, raw_data = self.uut.get_raw_data() self.assertIsInstance( data_id, int, "Expecting int for raw data id found {}".format(type(data_id))) self.assertIsInstance(raw_data, str) self.assertEqual(raw_data_id, data_id, "Expecting {} found {}".format(raw_data_id, data_id)) self.assertEqual( device_data1.decode("utf-8", "replace"), raw_data, "Expecting {!r} found {!r}".format( device_data1.decode("utf-8", "replace"), raw_data)) def test_230_transport_invalid_command_raises_error(self): """Test transport raises exception on invalid command.""" transport = fake_transport.FakeTransport() self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.command_queue.put(("Invalid command", None)) wait_for_queue_writes(self.command_queue) self.uut.start() end_time = time.time() + _EXCEPTION_TIMEOUT while self.uut.is_running() and time.time() < end_time: time.sleep(0.001) self.assertFalse(self.uut.is_running(), "Expected process to end, still running") self.uut.stop() self.assertEqual( 1, transport.open_count.value, "Expected transport.open to be called {} called {}".format( 1, transport.open_count.value)) self.assertEqual( 1, transport.close_count.value, "Expected transport.close to be called {} called {}".format( 1, transport.close_count.value)) self.assertIsNotNone(self.exception, "Expected exception to be raised found None") self.assertIn( "received an unknown command", repr(self.exception), "Expected 'received an unknown command' found {!r}".format( self.exception)) # Clear raised exception that was handled self.exception = None def test_231_transport_read_error_raises_error(self): """Test transport raises error on read exception.""" transport = fake_transport.FakeTransport(fail_read=True) self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) self.uut.start() end_time = time.time() + _EXCEPTION_TIMEOUT while self.uut.is_running() and time.time() < end_time: time.sleep(0.001) self.assertFalse(self.uut.is_open(), "Expected transport process to be closed, found open") self.assertFalse(self.uut.is_running(), "Expected process to end, still running") self.uut.stop() self.assertEqual( 1, transport.open_count.value, "Expected transport.open to be called {} called {}".format( 1, transport.open_count.value)) self.assertEqual( 1, transport.close_count.value, "Expected transport.close to be called {} called {}".format( 1, transport.close_count.value)) self.assertIsNotNone(self.exception, "Expected exception to be raised found None") self.assertIn( fake_transport.EXCEPTION_MESSAGE, self.exception, "Expected exception message {} found {!r}".format( fake_transport.EXCEPTION_MESSAGE, self.exception)) # Clear raised exception that was handled self.exception = None @unittest.skip("Skipping test_232_transport_read_retains_partial_lines") def test_232_transport_read_retains_partial_lines(self): """Test transport read can retain partial log lines.""" transport = fake_transport.FakeTransport() self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue) device_data1 = "partial log line\r" transport.reads.put(device_data1) wait_for_queue_writes(transport.reads) self.uut._pre_run_hook() self.uut._do_work() self.assertTrue(self.log_queue.empty(), "Expected log queue to be empty") time.sleep(_LOG_MESSAGE_TIMEOUT) self.uut._do_work() self.assertFalse(self.log_queue.empty(), "Expected log queue to not be empty") self.uut._post_run_hook() @unittest.skip( "Skipping test_233_transport_read_reorders_log_interrupted_partial_lines") def test_233_transport_read_reorders_log_interrupted_partial_lines(self): """Test transport read can order partial and full lines correctly. See NEP-3223 which is a bug in the interaction of TransportProcess and LogFramer. """ transport = fake_transport.FakeTransport() response_start = u"response start" response_end = u" end" response_full = response_start + response_end log_line = u"my custom log line\n" log_regex = "({})".format(log_line) framer = data_framer.InterwovenLogFramer(log_regex) uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue, framer=framer) device_data1 = response_start + log_line + response_end transport.reads.put(device_data1) wait_for_queue_writes(transport.reads) uut._pre_run_hook() uut._do_work() line1 = self.log_queue.get() self.assertIn( log_line, line1, "Expected {!r} in line but found {!r}".format(log_line, line1)) self.assertTrue(self.log_queue.empty(), "Expected log queue to be empty") time.sleep(_LOG_MESSAGE_TIMEOUT) uut._do_work() line2 = self.log_queue.get() self.assertIn( response_full, line2, "Expected {!r} in line but found {!r}".format(response_full, line2)) self.assertTrue(self.log_queue.empty(), "Expected log queue to be empty") uut._post_run_hook() def test_234_transport_read_orders_lines_correctly(self): """Test transport read can order partial and full lines correctly. See NEP-3223 which is a bug in the interaction of TransportProcess and LogFramer. """ response_start = u"response start" response_end = u" end\n" response_full = response_start + response_end log_line = u"my custom log line\n" log_regex = "({})".format(log_line) device_data1 = response_start + log_line + response_end transport = mock.MagicMock(fake_transport.FakeTransport) transport.read.return_value = device_data1.encode("utf-8", "replace") framer = data_framer.InterwovenLogFramer(log_regex) self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue, framer=framer) self.uut._pre_run_hook() self.uut._do_work() line1 = self.log_queue.get() self.assertIn( log_line, line1, "Expected {!r} in line but found {!r}".format(log_line, line1)) line2 = self.log_queue.get() self.assertIn( response_full, line2, "Expected {!r} in line but found {!r}".format(response_full, line2)) self.assertTrue(self.log_queue.empty(), "Expected log queue to be empty") self.uut._post_run_hook() @unittest.skip("Skipping test_240_transport_sleeps_if_not_open") def test_240_transport_sleeps_if_not_open(self): """Test transport read can retain partial log lines.""" transport = fake_transport.FakeTransport() transport.set_property(transport_properties.OPEN_ON_START, False) read_timeout = 0.03 self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue, read_timeout=read_timeout) device_data1 = "partial log line\r" transport.reads.put(device_data1) wait_for_queue_writes(transport.reads) self.uut._pre_run_hook() start_time = time.time() self.uut._do_work() end_time = time.time() self.assertFalse(self.uut.is_open(), "Expected transport process to be closed, found open") self.uut._post_run_hook() self.assertGreater( end_time - start_time, read_timeout, "Expected transport to sleep for > {}s, found {}s".format( read_timeout, end_time - start_time)) @unittest.skip( "Skipping test_300_transport_exceeds_maximum_baudrate_required") def test_300_transport_exceeds_maximum_baudrate_required(self): """Test baud rate which exceeds the maximum baud rate of 921600.""" baudrate = 115200 * 10 minimum_bytes_per_second = 115200 * 8 / 10 transport = fake_transport.FakeTransport( baudrate=baudrate, generate_lines=True) self.uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport, call_result_queue=self.call_result_queue, raw_data_queue=self.raw_data_queue) self.uut.toggle_raw_data() start_time = time.time() end_time = start_time + 5.0 self.uut._pre_run_hook() while time.time() < end_time: self.uut._do_work() finish_time = time.time() self.uut._post_run_hook() generated_bytes_per_second = transport.bytes_per_second.value self.assertLess( minimum_bytes_per_second, generated_bytes_per_second, "Expected generator to produce bytes > {} but found {}. " "Check load on this lab server.".format(minimum_bytes_per_second, generated_bytes_per_second)) bytes_received = 0 expected_count = 0 while not self.raw_data_queue.empty(): expected_count += 1 _, raw_data = self.raw_data_queue.get() actual_count = int(raw_data[:8]) self.assertEqual( expected_count, actual_count, "Expected line count {} found {} in {!r}".format( expected_count, actual_count, raw_data)) bytes_received += len(raw_data) actual_bytes_per_second = bytes_received / (finish_time - start_time) self.assertLess( minimum_bytes_per_second, actual_bytes_per_second, "Expected actual bytes per second > {} but found {}".format( minimum_bytes_per_second, actual_bytes_per_second)) def test_310_transport_process_open_close_commands(self): """Verify transport process processes OPEN and CLOSE commands correctly.""" transport = mock.Mock() uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport=transport, call_result_queue=self.call_result_queue) test_data = [ ((transport_process.CMD_TRANSPORT_CLOSE, None), transport.close), ((transport_process.CMD_TRANSPORT_OPEN, None), transport.open) ] for message, expected_action in test_data: with self.subTest(command=message[0]): transport.reset_mock() uut._process_command_message(message) expected_action.assert_called_once() def test_311_transport_process_write_command(self): """Verify transport process processes WRITE command correctly.""" transport = mock.Mock() uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport=transport, call_result_queue=self.call_result_queue) with mock.patch.object(transport_process, "_enqueue_command_writes") as mock_write: uut._process_command_message( (transport_process.CMD_TRANSPORT_WRITE, b"stuff")) mock_write.assert_called_once_with( mock.ANY, b"stuff", max_write_bytes=mock.ANY) def test_312_transport_process_call_command(self): """Verify transport process processes CALL command correctly.""" transport = mock.Mock() uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport=transport, call_result_queue=self.call_result_queue) transport.some_method.return_value = 123 uut._process_command_message(( transport_process.CMD_TRANSPORT_CALL, ("some_method", ("a", "b"), {"foo": "bar"}))) transport.some_method.assert_called_once_with("a", "b", foo="bar") success, return_value = self.call_result_queue.get(block=True, timeout=0.1) self.assertTrue(success) self.assertEqual(return_value, 123) def test_313_transport_process_call_command_error_handling(self): """Verify exceptions in transport methods are put into the result queue.""" transport = mock.Mock() uut = transport_process.TransportProcess( "fake_transport", self.exception_queue, self.command_queue, self.log_queue, transport=transport, call_result_queue=self.call_result_queue) transport.some_method.side_effect = RuntimeError("Something failed") uut._process_command_message(( transport_process.CMD_TRANSPORT_CALL, ("some_method", ("a", "b"), {"foo": "bar"}))) transport.some_method.assert_called_once_with("a", "b", foo="bar") success, error_traceback = self.call_result_queue.get( block=True, timeout=0.1) self.assertFalse(success) self.assertIn("RuntimeError: Something failed", error_traceback) def _verify_command_split(self, original_command, a_queue): count = 0 command = "" while not a_queue.empty(): count += 1 partial_command = switchboard_process.get_message(a_queue) self.assertIsInstance(partial_command, str) partial_command_len = len(partial_command) self.assertLessEqual( partial_command_len, transport_process._MAX_WRITE_BYTES, "Expected enqueued command of length {} found {}".format( transport_process._MAX_WRITE_BYTES, partial_command_len)) command += partial_command expected_count = 1 + ( len(original_command) // transport_process._MAX_WRITE_BYTES) self.assertEqual( expected_count, count, "Expected {} enqueued commands found {}".format(expected_count, count)) self.assertEqual( original_command, command, "Expected {!r} to match original command {!r}".format( command, original_command)) if __name__ == "__main__": unit_test_case.main()
import os import logging import argparse from pathlib import Path import shutil import time import copy import yaml import torch from torch.utils.tensorboard import SummaryWriter from datasets.datasets import Datasets from utils.logger import setup_logger from config.simclr_config import SimCLRConfig from models.simclr import SimCLR def setup_parser(): parser = argparse.ArgumentParser( description="Train a cnn for a classification task by fine-tuning a pretrained simclr model") parser.add_argument("config", help="path to config file") parser.add_argument("model", help="path to simcrl model file") parser.add_argument("epoch_num", help="epoch number of the pretrained simclr model") return parser def to_classification_model(simclr_model, num_classes, config): logger = logging.getLogger('test_transfer_learning') classification_model = SimCLR.get_resnet_model(config.simclr.model.resnet) logger.info('loaded classification model') num_ftrs = classification_model.fc.in_features classification_model.fc = torch.nn.Linear(num_ftrs, num_classes) logger.info('adjusted classification model. fc layer now matches number of classes: {}'.format(num_classes)) simclr_model.encoder.fc = torch.nn.Linear(num_ftrs, num_classes) logger.info('adjusted simclr model. fc layer in the encoder now matches number of classes: {}'.format(num_classes)) state_dict = simclr_model.encoder.state_dict() logger.info('extracted state dict from simclr model') classification_model.load_state_dict(state_dict) logger.info('loaded state dict into classification model') return classification_model def save_model(config, model, current_epoch): model_file_path = os.path.join(config.base.log_dir_path, "checkpoint_{}.pth".format(current_epoch)) torch.save(model.state_dict(), model_file_path) return model_file_path def load_model(config): model = SimCLR(config) model_file_path = os.path.join(config.fine_tuning.model_path, "checkpoint_{}.pth".format(config.fine_tuning.epoch_num)) model.load_state_dict(torch.load(model_file_path, map_location=config.base.device.type)) model = model.to(config.base.device) return model def train_model(model, criterion, optimizer, scheduler, dataloaders, dataset_sizes, config, num_epochs, writer): logger = logging.getLogger(config.base.logger_name) since = time.time() best_model_wts = copy.deepcopy(model.state_dict()) best_acc = 0.0 best_epoch = 0 log_every_n_steps = 100 for epoch in range(num_epochs): for phase in ['train', 'val']: if phase == 'train': model.train() else: model.eval() running_loss = 0.0 running_corrects = 0 for step, (inputs, labels) in enumerate(dataloaders[phase]): inputs = inputs.to(config.base.device) labels = labels.to(config.base.device) optimizer.zero_grad() with torch.set_grad_enabled(phase == 'train'): outputs = model(inputs) _, preds = torch.max(outputs, 1) loss = criterion(outputs, labels) if phase == 'train': loss.backward() optimizer.step() running_loss += loss.item() * inputs.size(0) running_corrects += torch.sum(preds == labels.data) if step % log_every_n_steps == 0: logger.info( "epoch [{:>4}|{:>4}] -> step [{:>5}|{:>5}] -> loss: {:.10}".format(epoch + 1, num_epochs, step + 1, len(dataloaders[phase]), loss.item())) if phase == 'train': scheduler.step() epoch_loss = running_loss / dataset_sizes[phase] epoch_acc = running_corrects.double() / dataset_sizes[phase] if epoch % config.fine_tuning.save_num_epochs == 0: save_model(config, model, epoch) writer.add_scalar("Loss/{}_epoch".format(phase), epoch_loss, epoch) writer.add_scalar("Accuracy/{}_epoch".format(phase), epoch_acc, epoch) logger.info('epoch [{:>4}|{:>4}] -> {}, loss: {:.10}, accuracy: {:.10}'.format( epoch + 1, num_epochs, phase, epoch_loss, epoch_acc)) if phase == 'val' and epoch_acc > best_acc: best_epoch = epoch best_acc = epoch_acc best_model_wts = copy.deepcopy(model.state_dict()) time_elapsed = time.time() - since logger.info('training complete in {:.0f}m {:.0f}s'.format( time_elapsed // 60, time_elapsed % 60)) logger.info('best validation accuracy: {:10}, epoch: {}'.format(best_acc, best_epoch)) # load best model weights model.load_state_dict(best_model_wts) return model def test_model(config, model, test_loader): logger = logging.getLogger(config.base.logger_name) model = model.to(config.base.device) model.eval() running_corrects = 0 num_test_images = len(test_loader.sampler) log_every_n_steps = 100 for step, (batch, results) in enumerate(test_loader): inputs = batch.to(config.base.device) labels = results.to(config.base.device) outputs = model(inputs) _, preds = torch.max(outputs, 1) step_corrects = torch.sum(preds == labels.data) running_corrects += step_corrects if step % log_every_n_steps == 0: logger.info("step [%5.i|%5.i] -> step_corrects: %i, running_corrects: %i" % ( step + 1, len(test_loader), step_corrects, running_corrects)) test_acc = running_corrects.double() / num_test_images logger.info('test accuracy: {}'.format(test_acc)) return test_acc def main(args): config_yaml = yaml.load(open(args.config, "r"), Loader=yaml.FullLoader) if not os.path.exists(args.config): raise FileNotFoundError('provided config file does not exist: {}'.format(args.config)) if 'restart_log_dir_path' not in config_yaml['simclr']['train'].keys(): config_yaml['simclr']['train']['restart_log_dir_path'] = None config_yaml['logger_name'] = 'classification' config = SimCLRConfig(config_yaml) if not os.path.exists(config.base.output_dir_path): os.mkdir(config.base.output_dir_path) if not os.path.exists(config.base.log_dir_path): os.makedirs(config.base.log_dir_path) logger = setup_logger(config.base.logger_name, config.base.log_file_path) logger.info('using config: {}'.format(config)) config_copy_file_path = os.path.join(config.base.log_dir_path, 'config.yaml') shutil.copy(args.config, config_copy_file_path) writer = SummaryWriter(log_dir=config.base.log_dir_path) if not os.path.exists(args.model): raise FileNotFoundError('provided model directory does not exist: %s' % args.model) else: logger.info('using model directory: {}'.format(args.model)) config.fine_tuning.model_path = args.model logger.info('using model_path: {}'.format(config.fine_tuning.model_path)) config.fine_tuning.epoch_num = args.epoch_num logger.info('using epoch_num: {}'.format(config.fine_tuning.epoch_num)) model_file_path = Path(config.fine_tuning.model_path).joinpath( 'checkpoint_' + config.fine_tuning.epoch_num + '.pth') if not os.path.exists(model_file_path): raise FileNotFoundError('model file does not exist: {}'.format(model_file_path)) else: logger.info('using model file: {}'.format(model_file_path)) train_dataset, val_dataset, test_dataset, classes = Datasets.get_datasets(config) num_classes = len(classes) train_loader, val_loader, test_loader = Datasets.get_loaders(config, train_dataset, val_dataset, test_dataset) dataloaders = { 'train': train_loader, 'val': val_loader, } dataset_sizes = { 'train': len(train_loader.sampler), 'val': len(val_loader.sampler) } simclr_model = load_model(config) logger.info('loaded simclr_model: {}'.format(config.fine_tuning.model_path)) classification_model = to_classification_model(simclr_model, num_classes, config) classification_model = classification_model.to(config.base.device) logger.info('created classification model from simclr model') criterion = torch.nn.CrossEntropyLoss() logger.info('created criterion') lr = config.fine_tuning.learning_rate momentum = config.fine_tuning.momentum optimizer_ft = torch.optim.SGD(classification_model.parameters(), lr=lr, momentum=momentum, nesterov=True) logger.info('created optimizer') step_size = config.fine_tuning.step_size gamma = config.fine_tuning.gamma exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft, step_size=step_size, gamma=gamma) logger.info('created learning rate scheduler') epochs = config.fine_tuning.epochs classification_model = train_model(classification_model, criterion, optimizer_ft, exp_lr_scheduler, dataloaders, dataset_sizes, config, epochs, writer) logger.info('completed model training') test_model(config, classification_model, test_loader) logger.info('completed model testing') trained_model_file_path = save_model(config, classification_model, epochs) logger.info('saved trained model: {}'.format(trained_model_file_path)) if __name__ == '__main__': main(setup_parser().parse_args())