content stringlengths 5 1.05M |
|---|
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.hybrid_shape_interfaces.hybrid_shape_direction import HybridShapeDirection
from pycatia.hybrid_shape_interfaces.hybrid_shape_sweep import HybridShapeSweep
from pycatia.in_interfaces.reference import Reference
from pycatia.knowledge_interfaces.angle import Angle
from pycatia.knowledge_interfaces.length import Length
class HybridShapeSweepLine(HybridShapeSweep):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| CATGSMIDLItf.HybridShapeSweep
| HybridShapeSweepLine
|
| Represents the sweep line object.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_sweep_line = com_object
@property
def angle_law(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property AngleLaw() As Reference
|
| Returns or sets the angular law.
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_sweep_line.AngleLaw)
@angle_law.setter
def angle_law(self, reference_law: Reference):
"""
:param Reference reference_law:
"""
self.hybrid_shape_sweep_line.AngleLaw = reference_law.com_object
@property
def angle_law_inversion(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property AngleLawInversion() As long
|
| Returns or sets whether the angular law has to be
| inverted.
| Legal angular law inversion values are:
| 0 The angular law has NOT to be inverted
| 1 The angular law has to be inverted
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.AngleLawInversion
@angle_law_inversion.setter
def angle_law_inversion(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.AngleLawInversion = value
@property
def angle_law_type(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property AngleLawType() As long
|
| Returns or sets the angular law type.
| Legal angular law type values are:
| 0 Undefined law type (CATGSMBasicLawType_None)
| 1 Constant law type (CATGSMBasicLawType_Constant)
| 2 Linear law type (CATGSMBasicLawType_Linear)
| 3 S law type (CATGSMBasicLawType_SType)
| 4 Law specified by a GSD law feature
| (CATGSMBasicLawType_Advanced)
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.AngleLawType
@angle_law_type.setter
def angle_law_type(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.AngleLawType = value
@property
def canonical_detection(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property CanonicalDetection() As long
|
| Returns or sets whether canonical surfaces of the swept surface are
| detected.
| Legal values:
| 0 No detection of canonical surface is performed.
| 2 Detection of canonical surfaces is performed.
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.CanonicalDetection
@canonical_detection.setter
def canonical_detection(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.CanonicalDetection = value
@property
def context(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Context() As long
|
| Returns or sets the context on Sweep feature.
|
| 0 This option creates Swept surface.
| 1 This option creates Swept volume.
|
|
| Note: Setting volume result requires GSO License.
|
| Example:
| This example retrieves in oContext the context for the Sweep hybrid
| shape feature.
|
| Dim oContext
| Set oContext = Sweep.Context
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.Context
@context.setter
def context(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.Context = value
@property
def draft_computation_mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property DraftComputationMode() As long
|
| Returns or sets the draft computation mode.
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.DraftComputationMode
@draft_computation_mode.setter
def draft_computation_mode(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.DraftComputationMode = value
@property
def draft_direction(self) -> HybridShapeDirection:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property DraftDirection() As HybridShapeDirection
|
| Returns or sets the draft direction.
|
| Example
| :
| This example retrieves in oDirection the direction of the LinearSweep
| feature.
|
| Dim oDirection As CATIAHybridShapeDirection
| Set oDirection = LinearSweep.DraftDirection
:return: HybridShapeDirection
:rtype: HybridShapeDirection
"""
return HybridShapeDirection(self.hybrid_shape_sweep_line.DraftDirection)
@draft_direction.setter
def draft_direction(self, direction: HybridShapeDirection):
"""
:param HybridShapeDirection direction:
"""
self.hybrid_shape_sweep_line.DraftDirection = direction.com_object
@property
def first_guide_crv(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FirstGuideCrv() As Reference
|
| Returns or sets the sweep operation first guide curve.
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_sweep_line.FirstGuideCrv)
@first_guide_crv.setter
def first_guide_crv(self, reference_curve: Reference):
"""
:param Reference reference_curve:
"""
self.hybrid_shape_sweep_line.FirstGuideCrv = reference_curve.com_object
@property
def first_guide_surf(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FirstGuideSurf() As Reference
|
| Returns or sets the sweep operation first guide surface.
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_sweep_line.FirstGuideSurf)
@first_guide_surf.setter
def first_guide_surf(self, reference_surface: Reference):
"""
:param Reference reference_surface:
"""
self.hybrid_shape_sweep_line.FirstGuideSurf = reference_surface.com_object
@property
def first_length_law(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FirstLengthLaw() As Reference
|
| Returns or sets the first length law useful in some linear sweep types.
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_sweep_line.FirstLengthLaw)
@first_length_law.setter
def first_length_law(self, reference_law: Reference):
"""
:param Reference reference_law:
"""
self.hybrid_shape_sweep_line.FirstLengthLaw = reference_law.com_object
@property
def first_length_law_inversion(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property FirstLengthLawInversion() As long
|
| Returns or sets whether the first length law has to be
| inverted.
| Legal length law inversion values are:
| 0 The length law has NOT to be inverted
| 1 The length law has to be inverted
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.FirstLengthLawInversion
@first_length_law_inversion.setter
def first_length_law_inversion(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.FirstLengthLawInversion = value
@property
def guide_deviation(self) -> Length:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property GuideDeviation() As Length (Read Only)
|
| Returns the deviation value (length) from guide curves allowed during a
| sweeping operation in order to smooth it.
:return: Length
:rtype: Length
"""
return Length(self.hybrid_shape_sweep_line.GuideDeviation)
@property
def guide_deviation_activity(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property GuideDeviationActivity() As boolean
|
| Returns or sets whether a deviation from guide curves is
| allowed.
| This property gives the information on performing smoothing during sweeping
| operation.
| TRUE if a deviation from guide curves is allowed, or FALSE otherwise (FALSE
| if not specified).
:return: bool
:rtype: bool
"""
return self.hybrid_shape_sweep_line.GuideDeviationActivity
@guide_deviation_activity.setter
def guide_deviation_activity(self, value: bool):
"""
:param bool value:
"""
self.hybrid_shape_sweep_line.GuideDeviationActivity = value
@property
def mode(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Mode() As long
|
| Returns or sets the linear sweep mode.
| Legal mode values are:
| 0 Undefined linear profile swept surface
| (CATGSMLinearSweep_None)
| 1 Linear profile swept surface defined by two guide curves
| (CATGSMLinearSweep_TwoGuides)
| 2 Linear profile swept surface defined by a guide curve and an angle
| (CATGSMLinearSweep_GuideAndAngleCurve)
| 3 Linear profile swept surface defined by a guide curve and a middle curve
| (CATGSMLinearSweep_GuideAndMiddle)
| 4 Linear profile swept surface defined by a guide curve and an angle from
| a reference surface
| (CATGSMLinearSweep_GuideAndRefSurfaceAngle)
| 5 Linear profile swept surface defined by a guide curve and a tangency
| surface (CATGSMLinearSweep_GuideAndTangencySurface)
| 6 Linear profile swept surface defined by a guide curve and a draft
| directio (CATGSMLinearSweep_GuideAndDraftDirection)
| 7 Linear profile swept surface defined by two tangency surfaces
| (CATGSMLinearSweep_TwoTangencySurfaces)
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.Mode
@mode.setter
def mode(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.Mode = value
@property
def second_guide_crv(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SecondGuideCrv() As Reference
|
| Returns or sets the sweep operation second guide curve.
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_sweep_line.SecondGuideCrv)
@second_guide_crv.setter
def second_guide_crv(self, reference_curve: Reference):
"""
:param Reference reference_curve:
"""
self.hybrid_shape_sweep_line.SecondGuideCrv = reference_curve.com_object
@property
def second_guide_surf(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SecondGuideSurf() As Reference
|
| Returns or sets the sweep operation second guide surface.
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_sweep_line.SecondGuideSurf)
@second_guide_surf.setter
def second_guide_surf(self, reference_surface: Reference):
"""
:param Reference reference_surface:
"""
self.hybrid_shape_sweep_line.SecondGuideSurf = reference_surface.com_object
@property
def second_length_law(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SecondLengthLaw() As Reference
|
| Returns or sets second length law useful in some linear sweep types.
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_sweep_line.SecondLengthLaw)
@second_length_law.setter
def second_length_law(self, reference_law: Reference):
"""
:param Reference reference_law:
"""
self.hybrid_shape_sweep_line.SecondLengthLaw = reference_law.com_object
@property
def second_length_law_inversion(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SecondLengthLawInversion() As long
|
| Returns or sets whether the second length law has to be
| inverted.
| Legal length law inversion values are:
| 0 The length law has NOT to be inverted
| 1 The length law has to be inverted
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.SecondLengthLawInversion
@second_length_law_inversion.setter
def second_length_law_inversion(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.SecondLengthLawInversion = value
@property
def second_trim_option(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SecondTrimOption() As long
|
| Returns or sets the trim option for the second tangency
| surface.
|
| Legal trim option values are:
| 0 No trim computed or trim undefined
| (CATGSMSweepTrimMode_None)
| 1 Trim computed (CATGSMSweepTrimMode_On)
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.SecondTrimOption
@second_trim_option.setter
def second_trim_option(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.SecondTrimOption = value
@property
def smooth_activity(self) -> bool:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SmoothActivity() As boolean
|
| Returns whether the sweeping operation is smoothed.
| TRUE if the sweeping operation is smoothed, or FALSE otherwise (FALSE if
| not specified).
:return: bool
:rtype: bool
"""
return self.hybrid_shape_sweep_line.SmoothActivity
@smooth_activity.setter
def smooth_activity(self, value: bool):
"""
:param bool value:
"""
self.hybrid_shape_sweep_line.SmoothActivity = value
@property
def smooth_angle_threshold(self) -> Angle:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SmoothAngleThreshold() As Angle (Read Only)
|
| Returns the angular threshold.
:return: Angle
:rtype: Angle
"""
return Angle(self.hybrid_shape_sweep_line.SmoothAngleThreshold)
@property
def solution_no(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SolutionNo() As long
|
| Returns or sets the choice number, which corresponds to each solution of a
| given linear sweep case.
| For example: a linear sweep with reference surface leads to four possible
| solutions.
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.SolutionNo
@solution_no.setter
def solution_no(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.SolutionNo = value
@property
def spine(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property Spine() As Reference
|
| Returns or sets the sweep operation spine (optional).
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_sweep_line.Spine)
@spine.setter
def spine(self, reference_spine: Reference):
"""
:param Reference reference_spine:
"""
self.hybrid_shape_sweep_line.Spine = reference_spine.com_object
@property
def trim_option(self) -> int:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property TrimOption() As long
|
| Returns or sets the trim option.
|
| Legal trim option values are:
| 0 No trim computed or trim undefined
| (CATGSMSweepTrimMode_None)
| 1 Trim computed (CATGSMSweepTrimMode_On)
:return: int
:rtype: int
"""
return self.hybrid_shape_sweep_line.TrimOption
@trim_option.setter
def trim_option(self, value: int):
"""
:param int value:
"""
self.hybrid_shape_sweep_line.TrimOption = value
def add_draft_angle_definition_location(self, ip_ia_loc_elem: Reference, i_ang: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub AddDraftAngleDefinitionLocation(Reference ipIALocElem,
| double iAng)
|
| Adds a draft angle location.
|
| Parameters:
|
| ipIALocElem
| The geometric element where the draft angle applies
|
| iAng
| The draft angle
:param Reference ip_ia_loc_elem:
:param float i_ang:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.AddDraftAngleDefinitionLocation(ip_ia_loc_elem.com_object, i_ang)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'add_draft_angle_definition_location'
# # vba_code = """
# # Public Function add_draft_angle_definition_location(hybrid_shape_sweep_line)
# # Dim ipIALocElem (2)
# # hybrid_shape_sweep_line.AddDraftAngleDefinitionLocation ipIALocElem
# # add_draft_angle_definition_location = ipIALocElem
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_angle(self, i_i: int) -> Angle:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetAngle(long iI) As Angle
|
| Returns the angle values useful in some linear sweep
| types.
|
| Parameters:
|
| iI
| The angle value index
|
| Returns:
| The angle value
:param int i_i:
:return: Angle
:rtype: Angle
"""
return Angle(self.hybrid_shape_sweep_line.GetAngle(i_i))
def get_angular_law(self, op_start_ang: Angle, op_end_ang: Angle, o_law_type: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetAngularLaw(Angle opStartAng,
| Angle opEndAng,
| long oLawType)
|
| Retrieves the angular law useful in some linear sweep
| types.
|
| Parameters:
|
| opStartAng
| The angular law start value
| opEndAng
| The angular law end value
| oLawType
| The angular law type
| Legal angular law type values are:
| 0 Undefined law type (CATGSMBasicLawType_None)
| 1 Constant law type (CATGSMBasicLawType_Constant)
| 2 Linear law type (CATGSMBasicLawType_Linear)
| 3 S law type (CATGSMBasicLawType_SType)
| 4 Law specified by a GSD law feature
| (CATGSMBasicLawType_Advanced)
:param Angle op_start_ang:
:param Angle op_end_ang:
:param int o_law_type:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetAngularLaw(op_start_ang.com_object, op_end_ang.com_object, o_law_type)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_angular_law'
# # vba_code = """
# # Public Function get_angular_law(hybrid_shape_sweep_line)
# # Dim opStartAng (2)
# # hybrid_shape_sweep_line.GetAngularLaw opStartAng
# # get_angular_law = opStartAng
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_choice_nb_surfaces(self, o_surf_ori1: int, o_surf_ori2: int, o_surf_coupl_ori1: int, o_surf_coupl_ori2: int,
o_no: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetChoiceNbSurfaces(long oSurfOri1,
| long oSurfOri2,
| long oSurfCouplOri1,
| long oSurfCouplOri2,
| long oNo)
|
| Gets a sequence which identifies a solution amongst all possibilities of a
| line-profile swept surface, case
| CATGSMLinearSweep_TwoTangencySurfaces.
|
| Parameters:
|
| oSurfOri1
| This orientation determines the location of the results with regard
| to the first surface. Possible values are:
| * +1 : the result is in the semi-space defined by the normal to the surface,
| * -1 : the result is in the semi-space defined by the opposite to the normal to the
| surface,
| * 0 : no orientation is specified, all the results are output,
| * 2 : the result changes of semi-space along the spine.
| oSurfOri2
| This orientation determines the location of the results with regard
| to the second surface. Possible values are as for oSurfOri1.
|
| oSurfCouplOri1
| This orientation determines the location of the results with regard
| to the trihedron defined by the the spine, the normal to the first surface and
| the tangent to the linear profile. Possible values
| are:
| * +1 : the output results are such that the triedron is counter clockwise,
| * -1 : the output results are such that the triedron is clockwise,
| * 0 : no orientation is specified, all the results are output,
| * 2 : the orientation of the trihedron changes along the spine.
| oSurfCouplOri2
| This orientation determines the location of the results with regard
| to the trihedron defined by the the spine, the normal to the second surface and
| the tangent to the linear profile. Possible values are as for oSurfCouplOri1.
|
| oNo
| Given the previous orientations, solution number in a distance
| ordered list.
:param int o_surf_ori1:
:param int o_surf_ori2:
:param int o_surf_coupl_ori1:
:param int o_surf_coupl_ori2:
:param int o_no:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetChoiceNbSurfaces(o_surf_ori1, o_surf_ori2, o_surf_coupl_ori1,
o_surf_coupl_ori2, o_no)
def get_choice_no(self, o_val1: int, o_val2: int, o_val3: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetChoiceNo(long oVal1,
| long oVal2,
| long oVal3)
|
| Retrieves the choice number associated with each solution of a given linear
| sweep case.
| Example: a linear sweep with one guide curve and a tangency surface may
| lead to several possible solutions.
|
| Parameters:
|
| oVal1
| The solution number (from 1 to n)
| oVal2
| In the example, the shell orientation : -1, +1 or 0 (both +1 and -1)
| val3
| In the example, the wire orientation : -1, +1 or 0 (both +1 and -1)
:param int o_val1:
:param int o_val2:
:param int o_val3:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetChoiceNo(o_val1, o_val2, o_val3)
def get_draft_angle_definition_location(self, i_loc: int, op_ia_element: Reference, o_angle: Angle) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetDraftAngleDefinitionLocation(long iLoc,
| Reference opIAElement,
| Angle oAngle)
|
| Retrieves the draft angle location element.
|
| Parameters:
|
| iLoc
| The draft angle location position in the list
| opIAElement
| The geometric element at that location and where the draft angle
| applies
| oAngle
| The draft angle
:param int i_loc:
:param Reference op_ia_element:
:param Angle o_angle:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetDraftAngleDefinitionLocation(i_loc, op_ia_element.com_object,
o_angle.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_draft_angle_definition_location'
# # vba_code = """
# # Public Function get_draft_angle_definition_location(hybrid_shape_sweep_line)
# # Dim iLoc (2)
# # hybrid_shape_sweep_line.GetDraftAngleDefinitionLocation iLoc
# # get_draft_angle_definition_location = iLoc
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_draft_angle_definition_locations_nb(self, o_count: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetDraftAngleDefinitionLocationsNb(long oCount)
|
| Retrieves the draft angle location list size.
|
| Parameters:
|
| oCount
| The draft angle location list size
:param int o_count:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetDraftAngleDefinitionLocationsNb(o_count)
def get_first_length_definition_type(self, o_first_type: int, op_ia_elem: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetFirstLengthDefinitionType(long oFirstType,
| Reference opIAElem)
|
| Retrieves the first length definition type.
|
| Parameters:
|
| oFirstType
| The first length definition type
| Legal length definition types are:
| 0 Undefined length type
| (CATGSMLinearSweepLengthType_None)
| 1 Length of the swept line in the sweeping plane from the guide
| curve (CATGSMLinearSweepLengthType_Standard)
| 2 No numerical value is required, equivalent to standard length at
| zero (CATGSMLinearSweepLengthType_FromCurve)
| 3 Up to or from a geometrical reference (a surface)
| (CATGSMLinearSweepLengthType_Reference)
| 4 Only for draft surfaces, the length is computed in the draft
| direction from an extremum point on the guide curve
| (CATGSMLinearSweepLengthType_FromExtremum)
| 5 Only for draft surfaces, the length will be used in a way
| similar to euclidean parallel curve distance on the swept surface
| (CATGSMLinearSweepLengthType_AlongSurface)
| opIAElem
| The geometric element where the first length definition type
| applies
:param int o_first_type:
:param Reference op_ia_elem:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetFirstLengthDefinitionType(o_first_type, op_ia_elem.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_first_length_definition_type'
# # vba_code = """
# # Public Function get_first_length_definition_type(hybrid_shape_sweep_line)
# # Dim oFirstType (2)
# # hybrid_shape_sweep_line.GetFirstLengthDefinitionType oFirstType
# # get_first_length_definition_type = oFirstType
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_first_length_law(self, o_length1: Length, o_length2: Length, o_law_type: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetFirstLengthLaw(Length oLength1,
| Length oLength2,
| long oLawType)
|
| Retrieves the first length law useful in some linear sweep
| types.
|
| Parameters:
|
| oLength1
| The length law start value
| oLength2
| The length law end value
| oLawType
| The length law type
| Legal length law type values are:
| 0 Undefined law type (CATGSMBasicLawType_None)
| 1 Constant law type (CATGSMBasicLawType_Constant)
| 2 Linear law type (CATGSMBasicLawType_Linear)
| 3 S law type (CATGSMBasicLawType_SType)
| 4 Law specified by a GSD law feature
| (CATGSMBasicLawType_Advanced)
:param Length o_length1:
:param Length o_length2:
:param int o_law_type:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetFirstLengthLaw(o_length1.com_object, o_length2.com_object, o_law_type)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_first_length_law'
# # vba_code = """
# # Public Function get_first_length_law(hybrid_shape_sweep_line)
# # Dim oLength1 (2)
# # hybrid_shape_sweep_line.GetFirstLengthLaw oLength1
# # get_first_length_law = oLength1
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_length(self, i_i: int) -> Length:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetLength(long iI) As Length
|
| Returns the length values useful in some linear sweep
| types.
|
| Parameters:
|
| iI
| The length value index
|
| Returns:
| The length value
:param int i_i:
:return: Length
:rtype: Length
"""
return Length(self.hybrid_shape_sweep_line.GetLength(i_i))
def get_length_law_types(self, o_first_type: int, o_second_type: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetLengthLawTypes(long oFirstType,
| long oSecondType)
|
| Gets length law types.
|
| Parameters:
|
| oFirstType
| First type of law.
| oSecondType
| Second type of law. oFirstType and oSecondType
| 0 : Undefined law type
| 1 : Constant law type
| 2 : Linear law type
| 3 : S law type
| 4 : Law specified by a GSD law feature
| 5 : Law specified by a set of points and parameters
:param int o_first_type:
:param int o_second_type:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetLengthLawTypes(o_first_type, o_second_type)
def get_longitudinal_relimiters(self, op_ia_elem1: Reference, op_ia_elem2: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetLongitudinalRelimiters(Reference opIAElem1,
| Reference opIAElem2)
|
| Deprecated:
| V5R16 CATHybridShapeSweepLine#GetRelimiters Retrieves the elements
| relimiting the spine (or the default spine).
| Parameters:
|
| opIAElem1
| The first relimiting feature (plane or point)
| opIAElem2
| The second relimiting feature (plane or point)
:param Reference op_ia_elem1:
:param Reference op_ia_elem2:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetLongitudinalRelimiters(op_ia_elem1.com_object, op_ia_elem2.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_longitudinal_relimiters'
# # vba_code = """
# # Public Function get_longitudinal_relimiters(hybrid_shape_sweep_line)
# # Dim opIAElem1 (2)
# # hybrid_shape_sweep_line.GetLongitudinalRelimiters opIAElem1
# # get_longitudinal_relimiters = opIAElem1
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_nb_angle(self, o_ang: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetNbAngle(long oAng)
|
| Retrieves the number of angles.
|
| Parameters:
|
| oAng
| The number of angles
:param int o_ang:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetNbAngle(o_ang)
def get_nb_guide_crv(self, o_num: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetNbGuideCrv(long oNum)
|
| Retrieves the number of guides curves.
|
| Parameters:
|
| oNum
| The number of guide curves
:param int o_num:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetNbGuideCrv(o_num)
def get_nb_guide_sur(self, o_num: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetNbGuideSur(long oNum)
|
| Retrieves the number of guide surfaces.
|
| Parameters:
|
| oNum
| The number of guides surfaces
:param int o_num:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetNbGuideSur(o_num)
def get_nb_length(self, o_len: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetNbLength(long oLen)
|
| Retrieves the number of lengths.
|
| Parameters:
|
| oLen
| The number of lengths
:param int o_len:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetNbLength(o_len)
def get_relimiters(self, op_ia_elem1: Reference, op_orient1: int, op_ia_elem2: Reference, op_orient2: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetRelimiters(Reference opIAElem1,
| long opOrient1,
| Reference opIAElem2,
| long opOrient2)
|
| Retrieves the elements relimiting the spine (or the default
| spine).
|
| Parameters:
|
| opIAElem1
| The first relimiting feature (plane or point)
| opOrient1
| Split direction for the first relimitation
| 0 means that the beginning of the spine (considering its
| orientation) is removed, 1 means that the end of the spine is removed
|
| opIAElem2
| The second relimiting feature (plane or point)
| opOrient2
| Split direction for the second relimitation
:param Reference op_ia_elem1:
:param int op_orient1:
:param Reference op_ia_elem2:
:param int op_orient2:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetRelimiters(op_ia_elem1.com_object, op_orient1, op_ia_elem2.com_object,
op_orient2)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_relimiters'
# # vba_code = """
# # Public Function get_relimiters(hybrid_shape_sweep_line)
# # Dim opIAElem1 (2)
# # hybrid_shape_sweep_line.GetRelimiters opIAElem1
# # get_relimiters = opIAElem1
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_second_length_definition_type(self, o_second_type: int, op_ia_elem: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetSecondLengthDefinitionType(long oSecondType,
| Reference opIAElem)
|
| Retrieves the second length definition type.
|
| Parameters:
|
| oSecondType
| The second length definition type
| Legal length definition types are:
| 0 Undefined length type
| (CATGSMLinearSweepLengthType_None)
| 1 Length of the swept line in the sweeping plane from the guide
| curve (CATGSMLinearSweepLengthType_Standard)
| 2 No numerical value is required, equivalent to standard length at
| zero (CATGSMLinearSweepLengthType_FromCurve)
| 3 Up to or from a geometrical reference (a surface)
| (CATGSMLinearSweepLengthType_Reference)
| 4 Only for draft surfaces, the length is computed in the draft
| direction from an extremum point on the guide curve
| (CATGSMLinearSweepLengthType_FromExtremum)
| 5 Only for draft surfaces, the length will be used in a way
| similar to euclidean parallel curve distance on the swept surface
| (CATGSMLinearSweepLengthType_AlongSurface)
| opIAElem
| The geometric element where the second length definition type
| applies
:param int o_second_type:
:param Reference op_ia_elem:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetSecondLengthDefinitionType(o_second_type, op_ia_elem.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_second_length_definition_type'
# # vba_code = """
# # Public Function get_second_length_definition_type(hybrid_shape_sweep_line)
# # Dim oSecondType (2)
# # hybrid_shape_sweep_line.GetSecondLengthDefinitionType oSecondType
# # get_second_length_definition_type = oSecondType
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def get_second_length_law(self, o_length1: Length, o_length2: Length, o_law_type: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetSecondLengthLaw(Length oLength1,
| Length oLength2,
| long oLawType)
|
| Retrieves the second length law useful in some linear sweep
| types.
|
| Parameters:
|
| oLength1
| The length law start value
| oLength2
| The length law end value
| oLawType
| The length law type
| Legal length law type values are:
| 0 Undefined law type (CATGSMBasicLawType_None)
| 1 Constant law type (CATGSMBasicLawType_Constant)
| 2 Linear law type (CATGSMBasicLawType_Linear)
| 3 S law type (CATGSMBasicLawType_SType)
| 4 Law specified by a GSD law feature
| (CATGSMBasicLawType_Advanced)
:param Length o_length1:
:param Length o_length2:
:param int o_law_type:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.GetSecondLengthLaw(o_length1.com_object, o_length2.com_object, o_law_type)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'get_second_length_law'
# # vba_code = """
# # Public Function get_second_length_law(hybrid_shape_sweep_line)
# # Dim oLength1 (2)
# # hybrid_shape_sweep_line.GetSecondLengthLaw oLength1
# # get_second_length_law = oLength1
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def insert_draft_angle_definition_location(self, i_elem: Reference, i_angle: Angle, i_pos: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub InsertDraftAngleDefinitionLocation(Reference iElem,
| Angle iAngle,
| long iPos)
|
| Inserts a geometrical element and a value necessary for draft angle
| definition after a given position in the lists.
|
| Parameters:
|
| iElem
| Geometrical element
| iAngle
| Angular parameter
| iPos
| Position in lists. To insert in the beginning of the list put iPos = 0.
:param Reference i_elem:
:param Angle i_angle:
:param int i_pos:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.InsertDraftAngleDefinitionLocation(i_elem.com_object, i_angle.com_object,
i_pos)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'insert_draft_angle_definition_location'
# # vba_code = """
# # Public Function insert_draft_angle_definition_location(hybrid_shape_sweep_line)
# # Dim iElem (2)
# # hybrid_shape_sweep_line.InsertDraftAngleDefinitionLocation iElem
# # insert_draft_angle_definition_location = iElem
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def remove_all_draft_angle_definition_locations(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemoveAllDraftAngleDefinitionLocations()
|
| Removes all geometrical elements and values necessary for draft angle
| definition.
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.RemoveAllDraftAngleDefinitionLocations()
def remove_angle(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemoveAngle()
|
| Removes an angle.
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.RemoveAngle()
def remove_draft_angle_definition_location_position(self, i_pos: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemoveDraftAngleDefinitionLocationPosition(long iPos)
|
| Removes a draft angle location.
|
| Parameters:
|
| iPos
| The position in the list of the draft angle location to
| remove
:param int i_pos:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.RemoveDraftAngleDefinitionLocationPosition(i_pos)
def remove_guide_crv(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemoveGuideCrv()
|
| Removes a guide curve.
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.RemoveGuideCrv()
def remove_guide_sur(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemoveGuideSur()
|
| Removes a guide surface.
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.RemoveGuideSur()
def remove_length(self) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub RemoveLength()
|
| Removes a length.
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.RemoveLength()
def set_angle(self, i_i: int, i_elem: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetAngle(long iI,
| double iElem)
|
| Sets the angle values useful in some linear sweep types.
|
| Parameters:
|
| iI
| The angle value index
| iElem
| The angle value
:param int i_i:
:param float i_elem:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetAngle(i_i, i_elem)
def set_angular_law(self, i_start_ang: float, i_end_ang: float, i_law_type: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetAngularLaw(double iStartAng,
| double iEndAng,
| long iLawType)
|
| Sets the angular law useful in some linear sweep types.
|
| Parameters:
|
| iStartAng
| The angular law start value
| iEndAng
| The angular law end value
| iLawType
| The angular law type
| Legal angular law type values are:
| 0 Undefined law type (CATGSMBasicLawType_None)
| 1 Constant law type (CATGSMBasicLawType_Constant)
| 2 Linear law type (CATGSMBasicLawType_Linear)
| 3 S law type (CATGSMBasicLawType_SType)
| 4 Law specified by a GSD law feature
| (CATGSMBasicLawType_Advanced)
:param float i_start_ang:
:param float i_end_ang:
:param int i_law_type:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetAngularLaw(i_start_ang, i_end_ang, i_law_type)
def set_choice_nb_surfaces(self, i_surf_ori1: int, i_surf_ori2: int, i_surf_coupl_ori1: int, i_surf_coupl_ori2: int,
i_no: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetChoiceNbSurfaces(long iSurfOri1,
| long iSurfOri2,
| long iSurfCouplOri1,
| long iSurfCouplOri2,
| long iNo)
|
| Sets a sequence which identifies a solution amongst all possibilities of a
| line-profile swept surface, case
| CATGSMLinearSweep_TwoTangencySurfaces.
|
| Parameters:
|
| iSurfOri1
| This orientation determines the location of the results with regard
| to the first surface. Possible values are:
| * +1 : the result is in the semi-space defined by the normal to the surface,
| * -1 : the result is in the semi-space defined by the opposite to the normal to the ,
| * 0 : no orientation is specified, all the results are output,
| * 2 : the result changes of semi-space along the spine.
| iSurfOri2
| This orientation determines the location of the results with regard
| to the second surface. Possible values are as for iSurfOri1.
|
| iSurfCouplOri1
| This orientation determines the location of the results with regard
| to the trihedron defined by the the spine, the normal to the first surface and
| the tangent to the linear profile. Possible values
| are:
| * +1 : the output results are such that the triedron is counter clockwise,
| * -1 : the output results are such that the triedron is clockwise,
| * 0 : no orientation is specified, all the results are output,
| * 2 : the orientation of the trihedron changes along the spine.
| iSurfCouplOri2
| This orientation determines the location of the results with regard
| to the trihedron defined by the the spine, the normal to the second surface and
| the tangent to the linear profile. Possible values are as for iSurfCouplOri2.
|
| iNo
| Given the previous orientations, solution number in a distance
| ordered list.
:param int i_surf_ori1:
:param int i_surf_ori2:
:param int i_surf_coupl_ori1:
:param int i_surf_coupl_ori2:
:param int i_no:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetChoiceNbSurfaces(i_surf_ori1, i_surf_ori2, i_surf_coupl_ori1,
i_surf_coupl_ori2, i_no)
def set_choice_no(self, i_val1: int, i_val2: int, i_val3: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetChoiceNo(long iVal1,
| long iVal2,
| long iVal3)
|
| Sets the choice number associated with each solution of a given linear
| sweep case.
| Example: a linear sweep with one guide curve and a tangency surface may
| lead to several possible solutions.
|
| Parameters:
|
| iVal1
| The solution number (from 1 to n)
| iVal2
| In the example, the shell orientation : -1, +1 or 0 (both +1 and -1)
| iVal3
| In the example, the wire orientation : -1, +1 or 0 (both +1 and -1)
:param int i_val1:
:param int i_val2:
:param int i_val3:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetChoiceNo(i_val1, i_val2, i_val3)
def set_first_length_definition_type(self, i_first_type: int, ip_ia_elem: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetFirstLengthDefinitionType(long iFirstType,
| Reference ipIAElem)
|
| Sets the first length definition type.
|
| Parameters:
|
| iFirstType
| The first length definition type
| Legal length definition types are:
| 0 Undefined length type
| (CATGSMLinearSweepLengthType_None)
| 1 Length of the swept line in the sweeping plane from the guide
| curve (CATGSMLinearSweepLengthType_Standard)
| 2 No numerical value is required, equivalent to standard length at
| zero (CATGSMLinearSweepLengthType_FromCurve)
| 3 Up to or from a geometrical reference (a surface)
| (CATGSMLinearSweepLengthType_Reference)
| 4 Only for draft surfaces, the length is computed in the draft
| direction from an extremum point on the guide curve
| (CATGSMLinearSweepLengthType_FromExtremum)
| 5 Only for draft surfaces, the length will be used in a way
| similar to euclidean parallel curve distance on the swept surface
| (CATGSMLinearSweepLengthType_AlongSurface)
| ipIAElem
| The geometric element where the first length definition type
| applies
:param int i_first_type:
:param Reference ip_ia_elem:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetFirstLengthDefinitionType(i_first_type, ip_ia_elem.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'set_first_length_definition_type'
# # vba_code = """
# # Public Function set_first_length_definition_type(hybrid_shape_sweep_line)
# # Dim iFirstType (2)
# # hybrid_shape_sweep_line.SetFirstLengthDefinitionType iFirstType
# # set_first_length_definition_type = iFirstType
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_first_length_law(self, i_length1: float, i_length2: float, i_law_type: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetFirstLengthLaw(double iLength1,
| double iLength2,
| long iLawType)
|
| Sets the first length law useful in some linear sweep
| types.
|
| Parameters:
|
| iLength1
| The length law start value
| iLength2
| The length law end value
| iLawType
| The length law type
| Legal length law type values are:
| 0 Undefined law type (CATGSMBasicLawType_None)
| 1 Constant law type (CATGSMBasicLawType_Constant)
| 2 Linear law type (CATGSMBasicLawType_Linear)
| 3 S law type (CATGSMBasicLawType_SType)
| 4 Law specified by a GSD law feature
| (CATGSMBasicLawType_Advanced)
:param float i_length1:
:param float i_length2:
:param int i_law_type:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetFirstLengthLaw(i_length1, i_length2, i_law_type)
def set_guide_deviation(self, i_length: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetGuideDeviation(double iLength)
|
| Sets the deviation value (length) from guide curves allowed during sweeping
| operation in order to smooth it.
|
| Parameters:
|
| iLength
| The deviation value
:param float i_length:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetGuideDeviation(i_length)
def set_length(self, i_i: int, i_elem: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetLength(long iI,
| double iElem)
|
| Sets the linear values useful in some linear sweep types.
|
| Parameters:
|
| iI
| The linear value index
| iElem
| The linear value
:param int i_i:
:param float i_elem:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetLength(i_i, i_elem)
def set_length_law_types(self, i_first_type: int, i_second_type: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetLengthLawTypes(long iFirstType,
| long iSecondType)
|
| Sets length law types.
|
| Parameters:
|
| iFirstType
| First type of law.
| iSecondType
| Second type of law. iFirstType and iSecondType
| 0 : Undefined law type
| 1 : Constant law type
| 2 : Linear law type
| 3 : S law type
| 4 : Law specified by a GSD law feature
| 5 : Law specified by a set of points and parameters
:param int i_first_type:
:param int i_second_type:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetLengthLawTypes(i_first_type, i_second_type)
def set_longitudinal_relimiters(self, ip_ia_elem1: Reference, ip_ia_elem2: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetLongitudinalRelimiters(Reference ipIAElem1,
| Reference ipIAElem2)
|
| Deprecated:
| V5R16 CATHybridShapeSweepLine#SetRelimiters Sets the elements
| relimiting the spine (or the default spine).
| Parameters:
|
| ipIAElem1
| The first relimiting feature (plane or point)
| ipIAElem2
| The second relimiting feature (plane or point)
:param Reference ip_ia_elem1:
:param Reference ip_ia_elem2:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetLongitudinalRelimiters(ip_ia_elem1.com_object, ip_ia_elem2.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'set_longitudinal_relimiters'
# # vba_code = """
# # Public Function set_longitudinal_relimiters(hybrid_shape_sweep_line)
# # Dim ipIAElem1 (2)
# # hybrid_shape_sweep_line.SetLongitudinalRelimiters ipIAElem1
# # set_longitudinal_relimiters = ipIAElem1
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_relimiters(self, ip_ia_elem1: Reference, ip_orient1: int, ip_ia_elem2: Reference, ip_orient2: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetRelimiters(Reference ipIAElem1,
| long ipOrient1,
| Reference ipIAElem2,
| long ipOrient2)
|
| Sets the elements relimiting the spine (or the default
| spine).
|
| Parameters:
|
| ipIAElem1
| The first relimiting feature (plane or point)
| ipOrient1
| Split direction for the first relimitation
| 0 means that the beginning of the spine (considering its
| orientation) is removed, 1 means that the end of the spine is removed
|
| ipIAElem2
| The second relimiting feature (plane or point)
| ipOrient2
| Split direction for the second relimitation
:param Reference ip_ia_elem1:
:param int ip_orient1:
:param Reference ip_ia_elem2:
:param int ip_orient2:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetRelimiters(ip_ia_elem1.com_object, ip_orient1, ip_ia_elem2.com_object,
ip_orient2)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'set_relimiters'
# # vba_code = """
# # Public Function set_relimiters(hybrid_shape_sweep_line)
# # Dim ipIAElem1 (2)
# # hybrid_shape_sweep_line.SetRelimiters ipIAElem1
# # set_relimiters = ipIAElem1
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_second_length_definition_type(self, i_second_type: int, ip_ia_elem: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetSecondLengthDefinitionType(long iSecondType,
| Reference ipIAElem)
|
| Sets the second length definition type.
|
| Parameters:
|
| iSecondType
| The second length definition type
| Legal length definition types are:
| 0 Undefined length type
| (CATGSMLinearSweepLengthType_None)
| 1 Length of the swept line in the sweeping plane from the guide
| curve (CATGSMLinearSweepLengthType_Standard)
| 2 No numerical value is required, equivalent to standard length at
| zero (CATGSMLinearSweepLengthType_FromCurve)
| 3 Up to or from a geometrical reference (a surface)
| (CATGSMLinearSweepLengthType_Reference)
| 4 Only for draft surfaces, the length is computed in the draft
| direction from an extremum point on the guide curve
| (CATGSMLinearSweepLengthType_FromExtremum)
| 5 Only for draft surfaces, the length will be used in a way
| similar to euclidean parallel curve distance on the swept surface
| (CATGSMLinearSweepLengthType_AlongSurface)
| ipIAElem
| The geometric element where the second length definition type
| applies
:param int i_second_type:
:param Reference ip_ia_elem:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetSecondLengthDefinitionType(i_second_type, ip_ia_elem.com_object)
# # # # Autogenerated comment:
# # some methods require a system service call as the methods expects a vb array object
# # passed to it and there is no way to do this directly with python. In those cases the following code
# # should be uncommented and edited accordingly. Otherwise completely remove all this.
# # vba_function_name = 'set_second_length_definition_type'
# # vba_code = """
# # Public Function set_second_length_definition_type(hybrid_shape_sweep_line)
# # Dim iSecondType (2)
# # hybrid_shape_sweep_line.SetSecondLengthDefinitionType iSecondType
# # set_second_length_definition_type = iSecondType
# # End Function
# # """
# # system_service = self.application.system_service
# # return system_service.evaluate(vba_code, 0, vba_function_name, [self.com_object])
def set_second_length_law(self, i_length1: float, i_length2: float, i_law_type: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetSecondLengthLaw(double iLength1,
| double iLength2,
| long iLawType)
|
| Sets the second length law useful in some linear sweep
| types.
|
| Parameters:
|
| iLength1
| The length law start value
| iLength2
| The length law end value
| iLawType
| The length law type
| Legal length law type values are:
| 0 Undefined law type (CATGSMBasicLawType_None)
| 1 Constant law type (CATGSMBasicLawType_Constant)
| 2 Linear law type (CATGSMBasicLawType_Linear)
| 3 S law type (CATGSMBasicLawType_SType)
| 4 Law specified by a GSD law feature
| (CATGSMBasicLawType_Advanced)
:param float i_length1:
:param float i_length2:
:param int i_law_type:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetSecondLengthLaw(i_length1, i_length2, i_law_type)
def set_smooth_angle_threshold(self, i_angle: float) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub SetSmoothAngleThreshold(double iAngle)
|
| Sets the angular threshold.
|
| Parameters:
|
| iAngle
| The angle numerical value
:param float i_angle:
:return: None
:rtype: None
"""
return self.hybrid_shape_sweep_line.SetSmoothAngleThreshold(i_angle)
def __repr__(self):
return f'HybridShapeSweepLine(name="{self.name}")'
|
import numpy as np
import glob
import os,sys
from skimage import io
from sklearn.model_selection import train_test_split
from tensorflow.keras.optimizers import SGD
from util import preprocess_image, create_model
def get_label_from_image_path(image_path, data_path):
path = image_path.replace(data_path, "");
paths = path.split("/")
label = int(paths[0])
return label
def get_training_data(data_path, num_classes, img_size):
images = []
labels = []
all_image_paths = glob.glob(os.path.join(data_path, '*/*.ppm'))
np.random.shuffle(all_image_paths)
print(data_path)
i = 0
for image_path in all_image_paths:
try:
img = preprocess_image(io.imread(image_path), img_size)
label = get_label_from_image_path(image_path, data_path)
images.append(img)
labels.append(label)
print("load images: {}".format(i))
i = i+1
except(IOError, OSError):
print("failed to process {}".format(image_path))
X = np.array(images, dtype='float32')
y = np.eye(num_classes, dtype='uint8')[labels]
return X, y
NUM_CLASSES = 43
IMG_SIZE = 48
TRAINING_DATA_PATH = "./GTSRB/Final_Training/Images/"
model = create_model(NUM_CLASSES, IMG_SIZE)
X, y = get_training_data(TRAINING_DATA_PATH, NUM_CLASSES, IMG_SIZE)
learning_rate = 0.01
sgd = SGD(lr=learning_rate, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
optimizer=sgd,
metrics=['accuracy'])
batch_size = 32
epochs = 30
history = model.fit(X, y,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2,
shuffle=True)
model.save(sys.argv[1])
|
# multiply a list by a number
def mul(row, num):
return [x * num for x in row]
# subtract one row from another
def sub(row_left, row_right):
return [a - b for (a, b) in zip(row_left, row_right)]
# calculate the row echelon form of the matrix
def echelonify(rw, i, m):
for j, row in enumerate(m[(i+1):]):
j += 1
# print("rw[i]:", rw[i])
if rw[i] != 0:
m[j+i] = sub(row, mul(rw, row[i] / rw[i]))
return rw
def row_echelon(m):
for i in range(len(m)): # len(m) == m x n
active_row = m[i]
echelonify(active_row, i, m)
# close to zero
m = [[(0 if (0.0000000001 > x > -0.0000000001) else x)
for x in row]for row in m]
return m
if __name__ == '__main__':
print("Enter number of rows and columns")
m, n = map(int, input().split()) # m = row and n = column
M = []
for _ in range(m):
row = list(map(int, input().split()))[:n]
M.append(row)
mat = row_echelon(M)
for row in mat:
print(' '.join((str(x) for x in row)))
# The output can be printed by dividing each element of each row by the first non-zero element of the respective row in order to get 1
|
import os.path
import urllib
import requests
import time
from bs4 import BeautifulSoup
SITE_URL = "http://www.xn--od1bu1t7pcgwb.net/"
BLOG_ID = 'inja0391'
DELAY = 2
BLOG_KEYWORD_PRINT_FORMAT = """------------------------------------
{} 키워드 - 블로그검색
------------------------------------"""
INTEGRATED_KEYWORD_PRINT_FORMAT = """------------------------------------
{} 키워드 - 통합검색
------------------------------------"""
BLOG_RESULT_PRINT_FORMAT = """
RANK : {}등
제목 : {}
"""
SITE_RESULT_PRINT_FORMAT = """
RANK : {}등
제목 : {} - 사이트
"""
THERE_IS_NO_RESULT = "1페이지 내에 결과 값이 없습니다."
def blog_search(blog_id, query):
ret = ""
search_url = "https://m.search.naver.com/search.naver?where=m_blog&sm=mtb_jum&query={}".format(
query)
headers = {
'User-Agent': 'Mozilla/5.0 (Android; Mobile; rv:13.0) Gecko/13.0 Firefox/13.0'}
response = requests.get(search_url, headers=headers)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
blogs = soup.find(class_='lst_total').find_all('a')
titles = soup.find(class_='lst_total').find_all(class_='total_tit')
ret += BLOG_KEYWORD_PRINT_FORMAT.format(query)
count = 0
for index, blog in enumerate(blogs):
if blog_id in blog['href']:
count += 1
ret += BLOG_RESULT_PRINT_FORMAT.format(index+1, titles[index].text)
if count == 0:
ret += THERE_IS_NO_RESULT
return ret
def integrated_search(site_url, blog_id, query):
ret = ""
search_url = "https://m.search.naver.com/search.naver?query={}&where=m&sm=mtp_hty".format(
query)
headers = {
'User-Agent': 'Mozilla/5.0 (Android; Mobile; rv:13.0) Gecko/13.0 Firefox/13.0'}
response = requests.get(search_url, headers=headers)
html = response.text
soup = BeautifulSoup(html, 'html.parser')
results = soup.find(class_='sp_ntotal').find(
'ul', class_='lst_total').find_all('a', class_='api_txt_lines total_tit')
count = 0
ret += INTEGRATED_KEYWORD_PRINT_FORMAT.format(query)
for index, result in enumerate(results):
if site_url in result['href']:
title = result.text
ret += SITE_RESULT_PRINT_FORMAT.format(index+1, title)
count += 1
if blog_id in result['href']:
title = result.text
ret += BLOG_RESULT_PRINT_FORMAT.format(index+1, title)
count += 1
if count == 0:
ret += THERE_IS_NO_RESULT
return ret
query_list = [
'양산누수',
'김해누수',
'부산누수',
'기장누수'
]
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'result.txt')
with open(filename, 'w') as f:
for query in query_list:
f.write(integrated_search(SITE_URL, BLOG_ID, query))
f.write(blog_search(BLOG_ID, query))
f.write('\n\n')
time.sleep(DELAY) |
#encoding: utf-8
from django.http import JsonResponse, HttpResponse
class HttpCode(object):
ok = 200
paramserror = 400
unauth = 401
methoderror = 405
servererror = 500
# {"code":400,"message":"","data":{}}
def result(code=HttpCode.ok,message="",data=None,kwargs=None):
json_dict = {"code":code,"message":message,"data":data}
if kwargs and isinstance(kwargs,dict) and kwargs.keys():
json_dict.update(kwargs)
return JsonResponse(json_dict)
def ok():
return result()
def params_error(message="",data=None):
return result(code=HttpCode.paramserror,message=message,data=data)
def unauth(message="",data=None):
return result(code=HttpCode.unauth,message=message,data=data)
def method_error(message='',data=None):
return result(code=HttpCode.methoderror,message=message,data=data)
def server_error(message='',data=None):
return result(code=HttpCode.servererror,message=message,data=data)
|
from easydict import EasyDict as edict
import os
import shutil
import yaml
def mkdir_if_not_exists(path):
"""Make a directory if it does not exist.
Args:
path: directory to create
"""
if not os.path.exists(path):
os.makedirs(path)
def read_yaml(filename):
"""Load yaml file as a dictionary item
Args:
filename (str): .yaml file path
Returns:
cfg (dict): configuration
"""
if filename is not None:
with open(filename, 'r') as f:
return yaml.load(f, Loader=yaml.FullLoader)
else:
return {}
def copy_file(src_file, tgt_file):
"""Copy a file
Args:
src_file (str): source file
tgt_file (str): target file
"""
shutil.copyfile(src_file, tgt_file)
def update_dict(dict1, dict2, intersection=False):
"""update dict1 according to dict2
Args:
dict1 (dict): reference dictionary
dict2 (dict): new dictionary
return
dict1 (dict): updated reference dictionary
"""
for item in dict2:
# if dict1.get(item, -1) != -1:
if item in dict1:
if isinstance(dict1[item], dict):
dict1[item] = update_dict(dict1[item], dict2[item], intersection)
else:
dict1[item] = dict2[item]
else:
if not intersection:
dict1[item] = dict2[item]
else:
raise ValueError(f"Key '{item}' is in the second dict but not in the first dict!")
#inverse check
for item in dict1:
if item not in dict2:
print(f"Warning: key {item} is not given and will use the default values")
return dict1
def merge_cfg(cfg_files, intersection=False):
"""merge default configuration and custom configuration
Args:
cfg_files (str): configuration file paths [default, custom]
Returns:
cfg (edict): merged EasyDict
"""
edict_items = []
# cfg = {}
cfg = read_yaml(cfg_files[0])
# for f in cfg_files:
for f in cfg_files[1:]:
# if f is not None:
if os.path.exists(f):
cfg = update_dict(cfg, read_yaml(f), intersection)
else:
raise ValueError(f"File {f} does not exist." )
return edict(cfg)
def write_cfg(default, custom, f, level_cnt=0):
"""write configuration to file
Args:
default (dict): default configuration dictionary
custom (dict): custom configuration dictionary
file (TextIOWrapper)
"""
offset_len = 100
for item in default:
if isinstance(default[item], dict):
if custom.get(item, -1) == -1:
custom[item] = {}
line = " "*level_cnt + item + ": "
offset = offset_len - len(line)
line += " "*offset + " # |"
f.writelines(line + "\n")
write_cfg(default[item], custom[item], f, level_cnt+1)
else:
line = " " * level_cnt + item + ": "
if custom.get(item, -1) == -1:
if default[item] is not None:
line += str(default[item])
offset = offset_len - len(line)
line += " "*offset + " # | "
else:
if custom[item] is not None:
line += str(custom[item])
offset = offset_len - len(line)
line += " "*offset + " # | "
if custom[item] != default[item]:
line += str(default[item])
f.writelines(line)
f.writelines("\n")
def save_cfg(cfg_files, file_path):
"""Save configuration file
Args:
cfg_files (str): configuration file paths [default, custom]
Returns:
cfg (edict): merged EasyDict
"""
# read configurations
default = read_yaml(cfg_files[0])
custom = read_yaml(cfg_files[1])
# create file to be written
f = open(file_path, 'w')
# write header line
line = "# " + "-"*20 + " Setup " + "-"*74
line += "|" + "-"*10 + " Default " + "-"*20 + "\n"
f.writelines(line)
# write configurations
write_cfg(default, custom, f)
f.close()
|
#!/usr/bin/env python3
from __future__ import print_function
from __future__ import absolute_import
import sys
import re, botocore
from taw.util import *
import taw.sshlike
import taw.instance
import taw.zone
import taw.bucket
import taw.vpc
import taw.subnet
import taw.list
import taw.sg
import taw.keypair
import taw.ip
import taw.image
import taw.completion
import taw.shell
from taw.taw import * # This must be the end of imports
# commands/subcommands
# Main runner
def main():
try:
taw()
except botocore.exceptions.EndpointConnectionError as e:
error_exit("Cannot connect to AWS. Check the network connection.\n" + str(e))
except botocore.exceptions.ClientError as e:
error_msg = str(e)
if re.search('but DryRun flag is set.', error_msg):
print("Request would have succeeded, but DryRun flag is set.")
sys.exit(0)
error_exit(str(e))
if __name__ == '__main__':
main()
|
"""Define tests for the AirVisual config flow."""
from asynctest import patch
from pyairvisual.errors import InvalidKeyError
from homeassistant import data_entry_flow
from homeassistant.components.airvisual import CONF_GEOGRAPHIES, DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import (
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_SHOW_ON_MAP,
)
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
conf = {
CONF_API_KEY: "abcde12345",
CONF_LATITUDE: 51.528308,
CONF_LONGITUDE: -0.3817765,
}
MockConfigEntry(
domain=DOMAIN, unique_id="51.528308, -0.3817765", data=conf
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_invalid_api_key(hass):
"""Test that invalid credentials throws an error."""
conf = {
CONF_API_KEY: "abcde12345",
CONF_LATITUDE: 51.528308,
CONF_LONGITUDE: -0.3817765,
}
with patch(
"pyairvisual.api.API.nearest_city", side_effect=InvalidKeyError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["errors"] == {CONF_API_KEY: "invalid_api_key"}
async def test_migration_1_2(hass):
"""Test migrating from version 1 to version 2."""
conf = {
CONF_API_KEY: "abcde12345",
CONF_GEOGRAPHIES: [
{CONF_LATITUDE: 51.528308, CONF_LONGITUDE: -0.3817765},
{CONF_LATITUDE: 35.48847, CONF_LONGITUDE: 137.5263065},
],
}
config_entry = MockConfigEntry(
domain=DOMAIN, version=1, unique_id="abcde12345", data=conf
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
with patch("pyairvisual.api.API.nearest_city"):
assert await async_setup_component(hass, DOMAIN, {DOMAIN: conf})
config_entries = hass.config_entries.async_entries(DOMAIN)
assert len(config_entries) == 2
assert config_entries[0].unique_id == "51.528308, -0.3817765"
assert config_entries[0].title == "Cloud API (51.528308, -0.3817765)"
assert config_entries[0].data == {
CONF_API_KEY: "abcde12345",
CONF_LATITUDE: 51.528308,
CONF_LONGITUDE: -0.3817765,
}
assert config_entries[1].unique_id == "35.48847, 137.5263065"
assert config_entries[1].title == "Cloud API (35.48847, 137.5263065)"
assert config_entries[1].data == {
CONF_API_KEY: "abcde12345",
CONF_LATITUDE: 35.48847,
CONF_LONGITUDE: 137.5263065,
}
async def test_options_flow(hass):
"""Test config flow options."""
conf = {CONF_API_KEY: "abcde12345"}
config_entry = MockConfigEntry(
domain=DOMAIN,
unique_id="abcde12345",
data=conf,
options={CONF_SHOW_ON_MAP: True},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.airvisual.async_setup_entry", return_value=True
):
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_SHOW_ON_MAP: False}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {CONF_SHOW_ON_MAP: False}
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_import(hass):
"""Test that the import step works."""
conf = {
CONF_API_KEY: "abcde12345",
CONF_LATITUDE: 51.528308,
CONF_LONGITUDE: -0.3817765,
}
with patch(
"homeassistant.components.airvisual.async_setup_entry", return_value=True
), patch("pyairvisual.api.API.nearest_city"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Cloud API (51.528308, -0.3817765)"
assert result["data"] == {
CONF_API_KEY: "abcde12345",
CONF_LATITUDE: 51.528308,
CONF_LONGITUDE: -0.3817765,
}
async def test_step_user(hass):
"""Test that the user step works."""
conf = {
CONF_API_KEY: "abcde12345",
CONF_LATITUDE: 32.87336,
CONF_LONGITUDE: -117.22743,
}
with patch(
"homeassistant.components.airvisual.async_setup_entry", return_value=True
), patch("pyairvisual.api.API.nearest_city"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Cloud API (32.87336, -117.22743)"
assert result["data"] == {
CONF_API_KEY: "abcde12345",
CONF_LATITUDE: 32.87336,
CONF_LONGITUDE: -117.22743,
}
|
from dy01.dy01 import DY01
__version__ = "1.0.0"
|
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, PermissionsMixin, UserManager
from django.conf import settings
from django.db.models.base import Model
import uuid
import os
def recipe_image_file_path(instance, file_name):
"""
generate file path for new recipe image
"""
extension = file_name.split('.')[-1] # return tthe last item after spliting
file_name = f'{uuid.uuid4()}.{extension}'
return os.path.join('uploads/recipe/',file_name)
class UserManager(BaseUserManager):
def create_user(self,email,password=None, **extra_fields):
if not email:
raise ValueError('Users must have an email address')
user = self.model(email= self.normalize_email(email),**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self,email,password):
user = self.create_user(email,password)
user.is_staff = True
user.is_superuser = True
user.save()
return user
class User(AbstractBaseUser,PermissionsMixin):
#Custom user model that supposts using emails insetead of username
email = models.EmailField(max_length=255,unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
# tag to be used for a recipe
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""
Ingredient object
"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""
recipe object
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete = models.CASCADE
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
# you can pass the class refrence, but you have to place the model above, so just pass the string of
# the object name you want to many to many
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to = recipe_image_file_path)
def __str__(self):
return self.title
|
# Copyright (c) 2018, 2020, Oracle Corporation and/or its affiliates.
# Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
#
# ------------
# Description:
# ------------
# This is a WDT filter for primordial domain creation. It filters out all resources and
# apps deployments, leaving only the domainInfo and admin server in topology.
#
def filter_model(model):
if model and 'topology' in model:
topology = model['topology']
if model['topology']['AdminServerName'] != None:
admin_server = topology['AdminServerName']
model['topology'] = {}
model['topology']['AdminServerName'] = admin_server
model['topology']['Server'] = {}
model['topology']['Server'][admin_server] = topology['Server'][admin_server]
else:
model['topology'] = {}
if 'Name' in topology:
model['topology']['Name'] = topology['Name']
if 'Security' in topology:
model['topology']['Security'] = topology['Security']
if model and 'appDeployments' in model:
model['appDeployments'] = {}
if model and 'resources' in model:
model['resources'] = {}
|
"""Tests for SunSpec api."""
import pytest
from custom_components.sunspec.api import ConnectionError
from custom_components.sunspec.api import ConnectionTimeoutError
from custom_components.sunspec.api import SunSpecApiClient
from sunspec2.modbus.client import SunSpecModbusClientException
from sunspec2.modbus.client import SunSpecModbusClientTimeout
async def test_api(hass, sunspec_client_mock):
"""Test API calls."""
# To test the api submodule, we first create an instance of our API client
api = SunSpecApiClient(host="test", port=123, slave_id=1, hass=hass)
models = await api.async_get_models()
assert models == [
1,
103,
160,
304,
701,
702,
703,
704,
705,
706,
707,
708,
709,
710,
711,
712,
]
device_info = await api.async_get_device_info()
assert device_info.getValue("Mn") == "SunSpecTest"
assert device_info.getValue("SN") == "sn-123456789"
model = await api.async_get_data(701)
assert model.getValue("W") == 9800
assert model.getMeta("W")["label"] == "Active Power"
model = await api.async_get_data(705)
keys = model.getKeys()
assert len(keys) == 22
async def test_modbus_connect(hass, sunspec_modbus_client_mock):
SunSpecApiClient.CLIENT_CACHE = {}
"""Test API calls."""
# To test the api submodule, we first create an instance of our API client
api = SunSpecApiClient(host="test", port=123, slave_id=1, hass=hass)
client = api.get_client()
client.scan.assert_called_once()
SunSpecApiClient.CLIENT_CACHE = {}
async def test_modbus_connect_fail(hass, mocker):
mocker.patch(
# api_call is from slow.py but imported to main.py
"sunspec2.modbus.client.SunSpecModbusClientDeviceTCP.connect",
return_value={},
)
mocker.patch(
# api_call is from slow.py but imported to main.py
"sunspec2.modbus.client.SunSpecModbusClientDeviceTCP.is_connected",
return_value=False,
)
"""Test API calls."""
# To test the api submodule, we first create an instance of our API client
api = SunSpecApiClient(host="test", port=123, slave_id=1, hass=hass)
with pytest.raises(Exception):
api.modbus_connect()
async def test_read_model_timeout(hass, mocker):
mocker.patch(
"custom_components.sunspec.api.SunSpecApiClient.read_model",
side_effect=SunSpecModbusClientTimeout,
)
api = SunSpecApiClient(host="test", port=123, slave_id=1, hass=hass)
with pytest.raises(ConnectionTimeoutError):
await api.async_get_data(1)
async def test_read_model_error(hass, mocker):
mocker.patch(
"custom_components.sunspec.api.SunSpecApiClient.read_model",
side_effect=SunSpecModbusClientException,
)
api = SunSpecApiClient(host="test", port=123, slave_id=1, hass=hass)
with pytest.raises(ConnectionError):
await api.async_get_data(1)
|
"""Forms for Stories and related entities."""
from bootstrap3_datetime.widgets import DateTimePicker
from django import forms
from django.db.models import Q
from django.forms import Textarea, TextInput, Select
from editorial.models import (
Project,
Series,
Story,
)
from editorial.widgets import ArrayFieldSelectMultiple
class StoryForm(forms.ModelForm):
"""Form to create/edit a new story."""
def __init__(self, *args, **kwargs):
org = kwargs.pop("organization")
story = kwargs.pop("story", None)
super(StoryForm, self).__init__(*args, **kwargs)
self.fields['share_with'].queryset = org.get_org_networks()
self.fields['collaborate_with'].queryset = org.get_org_collaborators_vocab()
# limit project and series to those owned by org or part of content and org is collaborator for
self.fields['project'].queryset = Project.objects.filter(
Q(organization=org) | Q(collaborate_with=org))
self.fields['series'].queryset = Series.objects.filter(
Q(organization=org) | Q(collaborate_with=org))
# set empty labels
self.fields['series'].empty_label = 'Select a series'
self.fields['project'].empty_label = 'Select a project'
embargo_datetime = forms.DateTimeField(
required=False,
widget=DateTimePicker(
options={'format': 'YYYY-MM-DD HH:mm'},
attrs={'id': 'story-embargo-picker'})
)
share_with_date = forms.DateTimeField(
required=False,
widget=DateTimePicker(
options={'format': 'YYYY-MM-DD HH:mm'},
attrs={'id': 'story-share-picker'})
)
class Meta:
model = Story
fields = ['name',
'story_description',
'project',
'series',
'collaborate',
'collaborate_with',
'embargo',
'embargo_datetime',
'sensitive',
'share',
'ready_to_share',
'share_with',
'share_with_date',
'archived',
]
widgets = {
'name': TextInput(attrs={'class': 'form-control', 'placeholder': 'Story Name'}),
'story_description': Textarea(
attrs={'class': 'form-control', 'placeholder': 'Description'}),
'collaborate_with': ArrayFieldSelectMultiple(
attrs={'class': 'chosen-select', 'id': 'collaborate-with',
'data-placeholder': 'Select Partners'}),
'share_with': ArrayFieldSelectMultiple(
attrs={'class': 'chosen-select', 'id': 'share-with',
'data-placeholder': 'Select Networks'}),
'series': Select(attrs={'class': 'c-select', 'id': 'story-series'}),
'project': Select(attrs={'class': 'c-select', 'id': 'story-project'}),
}
# class Media:
# css = {
# 'all': ('css/bootstrap-datetimepicker.css', 'css/chosen.min.css')
# }
# js = ('scripts/chosen.jquery.min.js',)
class StoryTeamForm(forms.ModelForm):
"""Form to create/edit a new story."""
def __init__(self, *args, **kwargs):
org = kwargs.pop("organization")
story = kwargs.pop("story", None)
super(StoryTeamForm, self).__init__(*args, **kwargs)
# TODO future should include eligible contractors
if story:
self.fields['team'].queryset = story.get_story_team_vocab()
else:
self.fields['team'].queryset = org.get_org_users()
class Meta:
model = Story
fields = ['team']
widgets = {
'team': ArrayFieldSelectMultiple(
attrs={'class': 'chosen-select', 'id': 'story-team',
'data-placeholder': 'Select Team'}),
}
|
from typing import List
from src.pc_methods.pc_base import Base
class GeoCNNv2(Base):
def __init__(self):
super().__init__()
def encode(self, orig_pc, enc_pc) -> List[str]:
cmd = ['python3',
self.cfg['encoder'],
'--input_files=' + str(orig_pc),
'--output_files=' + str(enc_pc),
'--opt_metrics=' + str(self.cfg['opt_metrics']),
'--checkpoint_dir=' + str(self.cfg['params'][self.id]['checkpoint_dir']),
'--model_config=' + str(self.cfg['params'][self.id]['model_config']),
'--resolution=' + str(self.cfg['params'][self.id]['resolution']),
'--octree_level=' + str(self.cfg['params'][self.id]['octree_level']),
]
return cmd
def decode(self, enc_file, dec_file) -> List[str]:
cmd = ['python3',
self.cfg['decoder'],
'--input_files=' + enc_file,
'--output_files=' + dec_file,
'--checkpoint_dir=' + str(self.cfg['params'][self.id]['checkpoint_dir']),
'--model_config=' + str(self.cfg['params'][self.id]['model_config']),
]
return cmd |
import numpy as np
from MagniPy.LensBuild.Cosmology.cosmology import Cosmo
class TNFW:
def __init__(self,z=None,zsrc=None,c_turnover=True,cosmology=None):
"""
adopting a standard cosmology, other cosmologies not yet implemented
:param z1: lens redshift
:param z2: source redshift
:param h: little h
"""
if cosmology is None:
if z is None or zsrc is None:
print('Warning; no cosmology specified.')
else:
self.cosmology = Cosmo(zd=z, zsrc=zsrc, compute=False)
self.z, self.zsrc = z, zsrc
else:
self.cosmology = cosmology
self.z, self.zsrc = cosmology.zd, cosmology.zsrc
self.c_turnover=c_turnover
def M_finite(self, rho, Rs, tau):
t2 = tau**2
return 4*np.pi*Rs**3*rho*t2*(t2+1)**-2*(
(t2-1)*np.log(tau) + np.pi*tau - (t2+1))
def def_angle(self, x, y, Rs=None, theta_Rs=None, r_trunc=None, center_x=0, center_y=0):
x_loc = x - center_x
y_loc = y - center_y
r = np.sqrt(x_loc ** 2 + y_loc ** 2)
xnfw = r * Rs ** -1
tau = r_trunc * Rs ** -1
xmin = 0.00000001
if isinstance(xnfw,float) or isinstance(xnfw,int):
xnfw = max(xmin,xnfw)
else:
xnfw[np.where(xnfw<xmin)] = xmin
magdef = theta_Rs * (1 + np.log(0.5)) ** -1 * self.t_fac(xnfw, tau) * xnfw ** -1
return magdef * x_loc * (xnfw*Rs) ** -1, magdef * y_loc * (xnfw*Rs) ** -1
def F(self,x):
if isinstance(x, np.ndarray):
nfwvals = np.ones_like(x)
inds1 = np.where(x < 1)
inds2 = np.where(x > 1)
nfwvals[inds1] = (1 - x[inds1] ** 2) ** -.5 * np.arctanh((1 - x[inds1] ** 2) ** .5)
nfwvals[inds2] = (x[inds2] ** 2 - 1) ** -.5 * np.arctan((x[inds2] ** 2 - 1) ** .5)
return nfwvals
elif isinstance(x, float) or isinstance(x, int):
if x == 1:
return 1
if x < 1:
return (1 - x ** 2) ** -.5 * np.arctanh((1 - x ** 2) ** .5)
else:
return (x ** 2 - 1) ** -.5 * np.arctan((x ** 2 - 1) ** .5)
def L(self,x,tau):
return np.log(x*(tau+np.sqrt(tau**2+x**2))**-1)
def t_fac(self, x, tau):
return tau ** 2 * (tau ** 2 + 1) ** -2 * (
(tau ** 2 + 1 + 2 * (x ** 2 - 1)) * self.F(x) + tau * np.pi + (tau ** 2 - 1) * np.log(tau) +
np.sqrt(tau ** 2 + x ** 2) * (-np.pi + self.L(x, tau) * (tau ** 2 - 1) * tau ** -1))
def _F(self, X, tau):
"""
analytic solution of the projection integral
(convergence)
:param x: R/Rs
:type x: float >0
"""
t2 = tau ** 2
Fx = self.F(X)
return t2 * (2 * np.pi * (t2 + 1) ** 2) ** -1 * (
((t2 + 1) * (X ** 2 - 1) ** -1) * (1 - Fx)
+
2 * Fx
-
np.pi * (t2 + X ** 2) ** -.5
+
(t2 - 1) * (tau * (t2 + X ** 2) ** .5) ** -1 * self.L(X, tau)
)
def kappa(self,x, y, Rs=None, theta_Rs=None, r_trunc=None, center_x=0, center_y=0):
x_loc = x - center_x
y_loc = y - center_y
r = np.sqrt(x_loc ** 2 + y_loc ** 2)
xnfw = r * Rs ** -1
tau = r_trunc * Rs ** -1
xmin = 0.00000001
if isinstance(xnfw, float) or isinstance(xnfw, int):
xnfw = max(xmin, xnfw)
else:
xnfw[np.where(xnfw < xmin)] = xmin
ks = theta_Rs*(4*Rs*(np.log(0.5)+1))**-1
return 2*ks*self._F(xnfw,tau)
def params(self, x=None,y=None,mass=float, mhm=None,truncation=None,c=None,**kwargs):
assert mhm is not None
assert mass is not None
rsdef,Rs,rho_mpc,Rs_mpc,r200_mpc = self.nfw_physical2angle(mass, c)
#ks = rsdef*(4*rs*(np.log(0.5)+1))**-1
subkwargs = {}
otherkwargs = {}
otherkwargs['name'] = 'TNFW'
subkwargs['theta_Rs'] = rsdef
subkwargs['Rs'] = Rs
subkwargs['center_x'] = x
subkwargs['center_y'] = y
if 'r_trunc' in kwargs:
subkwargs['r_trunc'] = kwargs['r_trunc']
else:
if truncation.routine == 'fixed_radius':
subkwargs['r_trunc'] = truncation.fixed_radius(Rs*c)
elif truncation.routine == 'virial3d':
subkwargs['r_trunc'] = truncation.virial3d(mass)
else:
raise Exception('specify truncation.')
otherkwargs['mass'] = mass
otherkwargs['c'] = c
otherkwargs['name'] = 'TNFW'
otherkwargs['mass_finite'] = self.M_finite(rho_mpc,Rs_mpc,subkwargs['r_trunc']*Rs**-1)
return subkwargs,otherkwargs
def M200(self, Rs, rho0, c):
"""
M(R_200) calculation for NFW profile
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param c: concentration
:type c: float [4,40]
:return: M(R_200) density
"""
return 4*np.pi*rho0*Rs**3*(np.log(1+c)-c/(1+c))
def r200_M(self, M):
"""
computes the radius R_200 of a halo of mass M in comoving distances M/h
:param M: halo mass in M_sun/h
:type M: float or numpy array
:return: radius R_200 in comoving Mpc/h
"""
return (3*M/(4*np.pi*self.cosmology.get_rhoc()*200))**(1./3.)
def M_r200(self, r200):
"""
:param r200: r200 in comoving Mpc/h
:return: M200
"""
return self.cosmology.get_rhoc()*200 * r200**3 * 4*np.pi/3.
def rho0_c(self, c):
"""
computes density normalization as a function of concentration parameter
:return: density normalization in h^2/Mpc^3 (comoving)
"""
return 200./3*self.cosmology.get_rhoc()*c**3/(np.log(1+c)-c/(1+c))
def tau(self,m,rt,mhm=False):
ks,rs = self.nfw_params(m,mhm=mhm)
return rt*rs**-1
def nfwParam_physical(self, M, c):
"""
returns the NFW parameters in physical units
:param M: physical mass in M_sun
:param c: concentration
:return:
"""
h = self.cosmology.cosmo.h
r200 = self.r200_M(M * h) * h * self.cosmology.a_z(self.z) # physical radius r200
rho0 = self.rho0_c(c) / h**2 / self.cosmology.a_z(self.z)**3 # physical density in M_sun/Mpc**3
Rs = r200/c
return rho0, Rs, r200
def nfw_physical2angle(self, M, c):
"""
converts the physical mass and concentration parameter of an NFW profile into the lensing quantities
:param M: mass enclosed 200 \rho_crit
:param c: NFW concentration parameter (r200/r_s)
:return: theta_Rs (observed bending angle at the scale radius, Rs_angle (angle at scale radius) (in units of arcsec)
"""
rho0, Rs, r200 = self.nfwParam_physical(M, c)
Rs_angle = Rs / self.cosmology.D_A(0,self.z) / self.cosmology.arcsec #Rs in asec
theta_Rs = rho0 * (4 * Rs ** 2 * (1 + np.log(1. / 2.)))
return theta_Rs / self.cosmology.get_epsiloncrit(self.z,self.cosmology.zsrc) / self.cosmology.D_A(0,self.z) / self.cosmology.arcsec, \
Rs_angle, rho0, Rs, r200
def M_physical(self,m200,mhm=0):
"""
:param m200: m200
:return: physical mass corresponding to m200
"""
c = self.nfw_concentration(m200,mhm=mhm)
rho0, Rs, r200 = self.nfwParam_physical(m200,c)
return 4*np.pi*rho0*Rs**3*(np.log(1+c)-c*(1+c)**-1)
def f(tau):
return tau ** 2 * (tau ** 2 + 1) ** -2 * ((tau ** 2 - 1) * np.log(tau) + tau * np.pi - tau ** 2 - 1)
def tau_factor(x, t):
return t ** 2 * ((1 + x) * (1 + t ** 2) ** 2) ** -1 * (
-x * (1 + t ** 2) + 2 * (1 + x) * t * np.arctan(x * t ** -1) + (1 + x) * (t ** 2 - 1) * np.log(
t * (1 + x)) - 0.5 * (1 + x) * (-1 + t ** 2) * np.log(x ** 2 + t ** 2))
|
#!/usr/bin/env python
"""
Growatt MQTT Client
--------------------------------------------------------------------------
Based on the "Pymodbus Synchronous Server Example", the synchronous server
will process Growatt Modbus TCP packets using custom Request and Response
handlers.
"""
# --------------------------------------------------------------------------- #
# import the various server implementations
# --------------------------------------------------------------------------- #
from pymodbus.server.sync import StartTcpServer
from pymodbus.device import ModbusDeviceIdentification
from pymodbus.datastore import ModbusSparseDataBlock
from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext
from PyGrowatt.Growatt import *
from PyGrowatt.growatt_framer import GrowattV6Framer
import threading
import configparser as configparser
import os
import paho.mqtt.client as mqtt
# --------------------------------------------------------------------------- #
# configure the service logging
# --------------------------------------------------------------------------- #
import logging
FORMAT = ('%(asctime)-15s %(levelname)-8s'
' %(module)s:%(funcName)s(%(lineno)s) %(message)s')
logging.basicConfig(format=FORMAT)
log = logging.getLogger()
log.setLevel(logging.INFO)
# ----------------------------------------------------------------------- #
# load the config from file
# ----------------------------------------------------------------------- #
config = configparser.ConfigParser()
config.read("config.ini")
def publish_data(datastore, interval, client):
""" Publish the energy data to MQTT
:param datastore: the ModbusDataBlock that contains the data
:param interval: the interval to publish the data
:param client: the MQTT client to publish the data
"""
log.info("Publishing data to MQTT")
# Publish received Energy data
# NOTE: Commented entries are input registers that are not documented and thus have not been implemented. Entered
# below for future expansion.
# client.publish("home/solar/wifi/serial", datastore.getValues(4, inputRegisters["wifi_serial"], 1)[0])
# client.publish("home/solar/inverter/serial", datastore.getValues(4, inputRegisters["inverter_serial"], 1)[0])
# client.publish("home/solar/date/year", datastore.getValues(4, inputRegisters["year"], 1)[0])
# client.publish("home/solar/date/month", datastore.getValues(4, inputRegisters["month"], 1)[0])
# client.publish("home/solar/date/day", datastore.getValues(4, inputRegisters["day"], 1)[0])
# client.publish("home/solar/date/hour", datastore.getValues(4, inputRegisters["hour"], 1)[0])
# client.publish("home/solar/date/minute", datastore.getValues(4, inputRegisters["min"], 1)[0])
# client.publish("home/solar/date/second", datastore.getValues(4, inputRegisters["sec"], 1)[0])
client.publish("home/solar/inverter/status",
inverter_status_description[datastore.getValues(4, inputRegisters["inverter_status"], 1)[0]])
client.publish("home/solar/PV/power", datastore.getValues(4, inputRegisters["Ppv"], 1)[0] / 10)
client.publish("home/solar/PV/energy/total", datastore.getValues(4, inputRegisters["Epv_total"], 1)[0] / 10)
client.publish("home/solar/PV1/voltage", datastore.getValues(4, inputRegisters["Vpv1"], 1)[0] / 10)
client.publish("home/solar/PV1/current", datastore.getValues(4, inputRegisters["Ipv1"], 1)[0] / 10)
client.publish("home/solar/PV1/power", datastore.getValues(4, inputRegisters["Ppv1"], 1)[0] / 10)
client.publish("home/solar/PV1/energy/today", datastore.getValues(4, inputRegisters["Epv1_today"], 1)[0] / 10)
client.publish("home/solar/PV1/energy/total", datastore.getValues(4, inputRegisters["Epv1_total"], 1)[0] / 10)
client.publish("home/solar/PV2/voltage", datastore.getValues(4, inputRegisters["Vpv2"], 1)[0] / 10)
client.publish("home/solar/PV2/current", datastore.getValues(4, inputRegisters["Ipv2"], 1)[0] / 10)
client.publish("home/solar/PV2/power", datastore.getValues(4, inputRegisters["Ppv2"], 1)[0] / 10)
client.publish("home/solar/PV2/energy/today", datastore.getValues(4, inputRegisters["Epv2_today"], 1)[0] / 10)
client.publish("home/solar/PV2/energy/total", datastore.getValues(4, inputRegisters["Epv2_total"], 1)[0] / 10)
client.publish("home/solar/AC/power", datastore.getValues(4, inputRegisters["Pac"], 1)[0] / 10)
client.publish("home/solar/AC/frequency", datastore.getValues(4, inputRegisters["Fac"], 1)[0] / 100)
client.publish("home/solar/AC1/voltage", datastore.getValues(4, inputRegisters["Vac1"], 1)[0] / 10)
client.publish("home/solar/AC1/current", datastore.getValues(4, inputRegisters["Iac1"], 1)[0] / 10)
client.publish("home/solar/AC1/power", datastore.getValues(4, inputRegisters["Pac1"], 1)[0] / 10)
# client.publish("home/solar/AC/voltage_RS", datastore.getValues(4, inputRegisters["Vac_RS"], 1)[0] / 10)
client.publish("home/solar/AC/energy/today", datastore.getValues(4, inputRegisters["Eac_today"], 1)[0] / 10)
client.publish("home/solar/AC/energy/total", datastore.getValues(4, inputRegisters["Eac_total"], 1)[0] / 10)
# Keep repeating
timer = threading.Timer(interval, publish_data, args=(datastore, interval, client))
timer.start()
return
if __name__ == "__main__":
# ----------------------------------------------------------------------- #
# initialize the data store
# The Holding Register is used for config data
# The Input Register is used for 'live' energy data
# The BufferedEnergy (0x50) will be stored in a "Buffered Input Register"
# ----------------------------------------------------------------------- #
input_register = ModbusSparseDataBlock([0] * 100)
holding_register = ModbusSparseDataBlock([0] * 100)
buffered_input_register = ModbusSparseDataBlock([0] * 100)
store = ModbusSlaveContext(hr=holding_register,
ir=input_register,
zero_mode=True)
store.register(0x18, 'h', holding_register)
store.register(0x19, 'h', holding_register)
store.register(0x50, 'bi', buffered_input_register)
context = ModbusServerContext(slaves=store, single=True)
# ----------------------------------------------------------------------- #
# initialize the server information
# ----------------------------------------------------------------------- #
identity = ModbusDeviceIdentification()
identity.VendorName = 'PyGrowatt'
identity.ProductCode = 'PG'
identity.VendorUrl = 'https://github.com/aaronjbrown/PyGrowatt'
identity.ProductName = 'Python Growatt Server'
identity.ModelName = 'Growatt Pymodbus Server'
identity.MajorMinorRevision = '1.0.0'
identity.UserApplicationName = os.path.basename(__file__)
# ----------------------------------------------------------------------- #
# start the server in a separate thread so it doesn't block this thread
# from publishing data to MQTT
# ----------------------------------------------------------------------- #
server_thread = threading.Thread(target=StartTcpServer,
name="ServerThread",
kwargs={"context": context,
"identity": identity,
"address": ("", 5279),
"custom_functions": [GrowattAnnounceRequest,
GrowattEnergyRequest,
GrowattPingRequest,
GrowattConfigRequest,
GrowattQueryRequest,
GrowattBufferedEnergyRequest,
],
"framer": GrowattV6Framer,
"allow_reuse_address": True,
},
)
server_thread.setDaemon(True)
server_thread.start()
# ----------------------------------------------------------------------- #
# establish connection to MQTT broker and periodically publish the data
# ----------------------------------------------------------------------- #
mqtt_client = mqtt.Client("Growatt MQTT")
log.info(f"Connecting to {config['MQTT']['ServerIP']}:{config['MQTT']['ServerPort']}")
mqtt_client.connect(host=config['MQTT']['ServerIP'], port=int(config['MQTT']['ServerPort']))
publish_data(store, int(config['Growatt']['UpdateInterval']) * 60, mqtt_client)
|
from django.contrib import admin
from .models import Empleado
# Register your models here.
class EmpleadoAdmin(admin.ModelAdmin):
lista = ['nombre_completo', 'email', 'contacto', 'direccion']
admin.site.register(Empleado, EmpleadoAdmin)
|
""" Module contains openapi3 client """
import json
import logging
import urllib.request
from urllib.error import HTTPError
from urllib.error import URLError
from urllib.parse import urlencode
from . import exceptions
logger = logging.getLogger()
DEFAULT_CLIENT_HEADERS = {
'User-Agent': 'Mozilla/5.0',
}
def send_request(
operation_id,
url,
operation,
query,
media_type=None,
body=None,
headers=None,
):
if headers is None:
headers = {}
request_headers = DEFAULT_CLIENT_HEADERS.copy()
request_headers.update(**headers)
full_url = '{url}?{query}'.format(
url=url,
query=urlencode(query)
)
if body is not None:
body = json.dumps(body).encode('utf-8')
request = urllib.request.Request(
full_url,
data=body,
headers=request_headers,
method=operation.upper(),
)
if media_type:
request.add_header("Content-Type", "application/json")
try:
response = urllib.request.urlopen(request)
except HTTPError as e:
logger.error('HTTPError for url: %s', full_url)
logger.exception(e)
raise exceptions.ApiRequestError(
url,
e.reason,
getattr(e, 'code', 422),
e.read(),
) from e
except URLError as e:
logger.error('URLError for url: %s', full_url)
logger.exception(e)
http_code = getattr(e, 'code', 503)
raise exceptions.ApiRequestError(
url,
e.reason,
http_code,
) from e
return response.code, response.headers.get_content_type(), response.read()
class Operation:
def __init__(self, client, pattern, operation, operation_id):
self.client = client
self.pattern = pattern
self.operation = operation
self.operation_id = operation_id
schema = self.client._schema
self.schema_path_params = schema.get_path_parameters(pattern)
self.schema_query_params = schema.get_query_parameters(
pattern,
operation,
)
def __call__(self, params=None, media_type=None, body=None, headers=None):
if not params:
params = {}
if headers is None:
headers = {}
path_params = {
k: v for k, v in params.items()
if k in self.schema_path_params
}
query_params = {
k: v for k, v in params.items()
if k in self.schema_query_params
}
self.client._schema.validate_parameters(
self.schema_path_params,
path_params,
)
self.client._schema.validate_parameters(
self.schema_query_params,
query_params,
)
query_params = self.client._schema.serialize_parameters(
self.schema_query_params,
query_params)
path = str(self.pattern)
for k, v in path_params.items():
path = path.replace('{{{}}}'.format(k), str(v))
url = self.client._url + path
if media_type is None and body is not None:
media_type = 'application/json'
self.client._schema.validate_request(
path,
self.operation,
query_params,
media_type,
body,
)
resp_code, resp_content_type, resp_content = self.client._send_request(
self.operation_id,
url,
self.operation,
query_params,
media_type,
body,
headers,
)
if resp_content_type in (
"application/x-www-form-urlencoded",
"multipart/form-data",
):
raise NotImplementedError
elif resp_content_type == 'application/json':
try:
resp_body = json.loads(resp_content.decode())
except (json.decoder.JSONDecodeError, ValueError) as exc:
logging.info(body)
raise exceptions.BodyValidationError(str(exc))
else:
resp_body = None
return self.client._schema.validate_response(
path,
self.operation,
resp_code,
resp_content_type,
resp_body,
)
class Client:
Operation = Operation
def __init__(self, url, schema, send_request=send_request):
self._url = url
self._schema = schema
self._send_request = send_request
for path, path_obj in schema['paths'].items():
for key, value in path_obj.items():
if key in schema.METHODS:
operation_id = value['operationId']
op = self._create_operation(path, key, operation_id)
setattr(self, operation_id, op)
def _create_operation(self, path, key, operation_id):
return self.Operation(self, path, key, operation_id)
|
import pandas as pd
import networkx as nx
def to_bed(x,out):
x[1] = x[1].astype(int)
x[2] = x[2].astype(int)
x.to_csv(out,sep="\t",header=False,index=False)
def read_bedpe(f):
df = pd.read_csv(f,sep="\t",header=None)
df[1] = df[1].astype(int)
df[2] = df[2].astype(int)
df[4] = df[4].astype(int)
df[5] = df[5].astype(int)
df[df.columns[-1]] = df[df[df.columns[-1]]<=0.05]
df = df.dropna()
return df
def read_bed(f):
df = pd.read_csv(f,sep="\t",header=None)
df[1] = df[1].astype(int)
df[2] = df[2].astype(int)
df = df.dropna()
return df
def mango2bed(df):
# print (df[[0,1,2]].head())
# print (df[[3,4,5]].head())
tmp = df[[0,1,2]].copy()
tmp2 = df[[3,4,5]].copy()
tmp2.columns = tmp.columns
tmp = pd.concat([tmp,tmp2],axis=0)
# print (tmp.head())
tmp['name'] = tmp[0]+"-"+tmp[1].astype(str)+"-"+tmp[2].astype(str)
tmp = tmp.drop_duplicates('name')
to_bed(tmp,"mango.bed")
mango_file = "Hudep2_D0_H3K27AC_HiChIP_FS.interactions.all.mango"
import sys
input_query_bed = sys.argv[1]
df = read_bedpe(mango_file)
df['source'] = df[0]+"-"+df[1].astype(str)+"-"+df[2].astype(str)
df['target'] = df[3]+"-"+df[4].astype(str)+"-"+df[5].astype(str)
print (df.head())
g=nx.from_pandas_edgelist(df, 'source', 'target')
my_dict = g.degree
# print (my_dict)
df1 = [[x[0],x[1]] for x in g.degree]
df1 = pd.DataFrame(df1)
df1.columns=['Genomic coordinates','Number of contacts']
df1 = df1.set_index('Genomic coordinates')
print (df1.head())
mango2bed(df)
## overlap query bed with mango bed
import os
os.system("bedtools intersect -a %s -b mango.bed -wao > intersect.bed"%(input_query_bed))
df2 = read_bed("intersect.bed")
df2[df2.columns[-2]] = df2[df2.columns[-2]].map(df1['Number of contacts'].to_dict())
df2 = df2.fillna(0)
df2.to_csv("%s_degree.csv"%(input_query_bed))
|
import tensorflow as tf
import os
import sys
import pickle
import I2S_Model_Transformer
import selfies
import numpy as np
import argparse
import efficientnet.tfkeras as efn
parser = argparse.ArgumentParser()
parser.add_argument('file', nargs='+')
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"]="0"
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
print(args)
for file_in in args.file:
f = open('Preds_'+str(file_in)+'.txt' , 'w')
sys.stdout = f
tokenizer = pickle.load(open("tokenizer_Isomeric_SMILES.pkl","rb"))
maxlength = pickle.load(open("max_length_Isomeric_SMILES.pkl","rb"))
target_vocab_size = len(tokenizer.word_index)
num_layer = 4
d_model = 512
dff = 2048
num_heads = 8
row_size = 10
col_size = 10
dropout_rate = 0.1
optimizer = tf.keras.optimizers.Adam(learning_rate=0.00051)
transformer = I2S_Model_Transformer.Transformer(num_layer,d_model,num_heads,dff,row_size,col_size,target_vocab_size,max_pos_encoding=target_vocab_size,rate=dropout_rate)
target_size=(300,300,3)
def load_image(image_path):
img = tf.io.read_file(image_path)
img = tf.image.decode_png(img, channels=3)
img = tf.image.resize(img, (300, 300))
img = efn.preprocess_input(img)
return img, image_path
#Using EfficientnetB3 and using the pretrained Imagenet weights
image_model = efn.EfficientNetB3(weights='noisy-student',input_shape=target_size, include_top=False)
new_input = image_model.input
hidden_layer = image_model.layers[-1].output
image_features_extract_model = tf.keras.Model(new_input, hidden_layer)
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_masks_decoder(tar):
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return combined_mask
#Evaluator
def evaluate(image):
temp_input = tf.expand_dims(load_image(image)[0], 0)
img_tensor_val = image_features_extract_model(temp_input)
img_tensor_val = tf.reshape(img_tensor_val, (img_tensor_val.shape[0], -1, img_tensor_val.shape[3]))
output = tf.expand_dims([tokenizer.word_index['<start>']], 0)
result = []
end_token = tokenizer.word_index['<end>']
for i in range(maxlength):
dec_mask = create_masks_decoder(output)
predictions, attention_weights = transformer(img_tensor_val,output,False,dec_mask)
predictions = predictions[: ,-1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
if predicted_id == end_token:
return result#,tf.squeeze(output, axis=0), attention_weights
result.append(tokenizer.index_word[int(predicted_id)])
output = tf.concat([output, predicted_id], axis=-1)
return result#,tf.squeeze(output, axis=0), attention_weights
checkpoint_path = "SMILES"
ckpt = tf.train.Checkpoint(transformer=transformer,optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=50)
if ckpt_manager.latest_checkpoint:
ckpt.restore(tf.train.latest_checkpoint(checkpoint_path))
# Predicting Smiles on the validation set
with open(str(file_in), 'r') as txt_file:
for i,line in enumerate(txt_file):
img = line.strip().split("\t")[0]
cap = line.strip().split("\t")[1]
image = 'PubChem_images/'+img+'.png'
real_caption = ''.join(cap)
result = evaluate(image)
print (real_caption.replace(" ","").replace("<start>","").replace("<end>",""),'\tOriginalSmiles', flush=True)
print (''.join(result).replace("<start>","").replace("<end>",""),'\tPredictedSmiles', flush=True)
#print("Predictions Completed!")
f.close() |
"""
Python support for notebook I/O on Tiledb Cloud. All notebook JSON content
is assumed to be encoded as UTF-8.
"""
import posixpath
import time
from typing import Optional, Tuple
import numpy
import tiledb
from tiledb.cloud import array
from tiledb.cloud import client
from tiledb.cloud import rest_api
from tiledb.cloud import tiledb_cloud_error
from tiledb.cloud.rest_api import ApiException as GenApiException
from tiledb.cloud.rest_api import rest
RESERVED_NAMESPACES = frozenset(["cloud", "owned", "public", "shared"])
CHARACTER_ENCODING = "utf-8"
def rename_notebook(
tiledb_uri,
notebook_name=None,
access_credentials_name=None,
async_req=False,
):
"""
Update an array's info
:param str tiledb_uri: such as "tiledb://TileDB-Inc/quickstart_dense".
:param str notebook_name: such as "quickstart_dense_new_name".
:param str access_credentials_name: optional name of access credentials to
use. If left blank. default for namespace will be used.
:param bool async_req: return future instead of results for async support.
"""
api_instance = client.client.notebook_api
(namespace, current_notebook_name) = array.split_uri(tiledb_uri)
try:
return api_instance.update_notebook_name(
namespace=namespace,
array=current_notebook_name,
notebook_metadata=rest_api.models.ArrayInfoUpdate(
name=notebook_name,
uri=tiledb_uri,
access_credentials_name=access_credentials_name,
),
async_req=async_req,
)
except GenApiException as exc:
raise tiledb_cloud_error.check_exc(exc) from None
def download_notebook_to_file(
tiledb_uri: str,
ipynb_file_name: str,
) -> None:
"""
Downloads a notebook file from TileDB Cloud to local disk.
:param tiledb_uri: such as "tiledb://TileDB-Inc/quickstart_dense".
:param ipnyb_file_name: path to save to, such as "./mycopy.ipynb". Must be
local; no S3 URI support at present.
"""
ipynb_file_contents = download_notebook_contents(
tiledb_uri,
)
vfs = tiledb.VFS(tiledb.cloud.Ctx().config())
with tiledb.FileIO(vfs, ipynb_file_name, mode="wb") as fio:
fio.write(bytes(ipynb_file_contents, "utf-8"))
def download_notebook_contents(
tiledb_uri: str,
) -> str:
"""
Downloads a notebook file from TileDB Cloud to contents as a string,
nominally in JSON format.
:param tiledb_uri: such as "tiledb://TileDB-Inc/quickstart_dense".
:return: contents of the notebook file as a string, nominally in JSON format.
"""
ctx = tiledb.cloud.Ctx({})
with tiledb.open(tiledb_uri, "r", ctx=ctx) as arr:
size = arr.meta["file_size"]
data = arr.query(attrs=["contents"])[slice(0, size)]
json = data["contents"].tobytes().decode("utf-8")
return json
# TODO: auto-increment/overwrite logic
# If the destination array name already exists -- e.g. uploading 'foo.ipynb' to
# 'testing-upload' -- there are three options:
# 1. Fail the upload with 'already exists' and require the user to supply a
# different path. No clobbering
# 2. Auto-increment the array name, e.g. from 'testing-upload' to 'testing-upload-1'
# and then 'testing-upload-2' the next time, and so on.
# 3. Overwrite
#
# Thoughts:
# * Option 3 isn't a safe default -- for those who want it it's fine but for
# those who don't it can be seen as unwelcome data loss.
# * Option 2 is a not-bad default -- there is no data loss, but some users
# might be left feeling 'Why are you creating all these versions? I just
# want to update one notebook, not have twenty copies."
# * Option 1 is a safe default -- there is no data loss and no profusion of
# copies. However, it is more frictional for the user, requiring them to
# make the decision.
#
# Implementation:
#
# * We could have a force-overwrite argument, optional, default False.
# * We could have a behavior-on-exist argument, of enum type, 3 cases, one
# for each of the options above.
#
# Status: As of this writing: we have implemented option 1, and we don't have
# an overwrite/update-in-place flag.
def upload_notebook_from_file(
ipynb_file_name: str,
namespace: str,
array_name: str,
storage_path: Optional[str],
storage_credential_name: Optional[str],
) -> str:
"""
Uploads a local-disk notebook file to TileDB Cloud.
:param ipnyb_file_name: such as "./mycopy.ipynb". Must be local; no S3 URI
support at present.
:param namespace: such as "janedoe".
:param array_name : name to be seen in the UI, such as "testing-upload".
:param storage_path: such as "s3://acmecorp-janedoe", typically from the
user's account settings.
:param storage_credential_name: such as "janedoe-creds", typically from the
user's account settings.
:return: TileDB array name, such as "tiledb://janedoe/testing-upload".
"""
vfs = tiledb.VFS(tiledb.cloud.Ctx().config())
with tiledb.FileIO(vfs, ipynb_file_name, mode="rb") as fio:
ipynb_file_contents = fio.read()
return upload_notebook_contents(
str(ipynb_file_contents),
storage_path,
array_name,
namespace,
storage_credential_name,
)
def upload_notebook_contents(
ipynb_file_contents: str,
storage_path: Optional[str],
array_name: str,
namespace: str,
storage_credential_name: Optional[str],
) -> str:
"""
Uploads a notebook file to TileDB Cloud.
:param ipnyb_file_contents: The contents of the notebook file as a string,
nominally in JSON format.
:param storage_path: such as "s3://acmecorp-janedoe", typically from the
user's account settings.
:param array_name : name to be seen in the UI, such as "testing-upload"
:param namespace: such as "janedoe".
:param storage_credential_name: such as "janedoe-creds", typically from the
user's account settings.
:return: TileDB array name, such as "tiledb://janedoe/testing-upload".
"""
if storage_credential_name is None:
storage_credential_name = (
tiledb.cloud.user_profile().default_s3_path_credentials_name
)
if storage_path is None:
storage_path = tiledb.cloud.user_profile().default_s3_path
if storage_credential_name is None:
raise tiledb_cloud_error.TileDBCloudError(
f"No storage credentials found in account. Please add them there, or pass them in explicitly here."
) from e
if storage_path is None:
raise tiledb_cloud_error.TileDBCloudError(
f"No storage path found in account. Please add it there, or pass it in explicitly here."
) from e
ctx = tiledb.cloud.Ctx(
{"rest.creation_access_credentials_name": storage_credential_name}
)
tiledb_uri, array_name = _create_notebook_array(
storage_path,
array_name,
namespace,
ctx,
)
_write_notebook_to_array(tiledb_uri, ipynb_file_contents, ctx)
return tiledb_uri
def _create_notebook_array(
storage_path: str,
array_name: str,
namespace: str,
ctx: tiledb.Ctx,
*,
retries: int = 0,
) -> Tuple[str, str]:
"""
Creates a new array for storing a notebook file.
:param storage_path: such as "s3://acmecorp-janedoe", typically from the
user's account settings.
:param array_name : name to be seen in the UI, such as "testing-upload"
:param namespace: such as "janedoe".
:param ctx: cloud context for the operation.
:return: tuple of tiledb_uri and array_name
"""
if namespace in RESERVED_NAMESPACES:
raise ValueError(
f"{namespace!r} is not a valid folder to create notebooks. "
"Please select a proper namespace (username or organization name).",
)
# The array will be be 1-dimensional with domain of 0 to max uint64. We
# use a tile extent of 1024 bytes.
dom = tiledb.Domain(
tiledb.Dim(
name="position",
domain=(0, numpy.iinfo(numpy.uint64).max - 1025),
tile=1024,
dtype=numpy.uint64,
ctx=ctx,
filters=tiledb.FilterList([tiledb.ZstdFilter()]),
),
ctx=ctx,
)
tries = 1 + retries # 1st + rest
while True:
try:
tiledb_uri, array_name = _create_notebook_array_retry_helper(
storage_path,
array_name,
namespace,
dom,
ctx,
)
return (tiledb_uri, array_name)
except tiledb.TileDBError as e:
if "Error while listing with prefix" in str(e):
# It is possible to land here if user sets wrong default S3
# credentials with respect to default S3 path.
raise tiledb_cloud_error.TileDBCloudError(
f"Error creating file: {e}. Are your credentials valid?"
) from e
if "Cannot create array" in str(e) and "already exists" in str(e):
raise tiledb_cloud_error.TileDBCloudError(
f"Error creating file: {array_name!r} already exists in namespace {namespace!r}."
)
# Retry other TileDB erors
tries -= 1
if tries <= 0:
raise tiledb_cloud_error.check_exc(e) from None
def _create_notebook_array_retry_helper(
storage_path: str,
array_name: str,
namespace: str,
dom: tiledb.Domain,
ctx: tiledb.Ctx,
) -> Tuple[bool, str, str]:
"""
See _create_notebook_array -- exists only for retry logic.
:return: tuple of succeeded, tiledb_uri, and array_name
"""
schema = tiledb.ArraySchema(
domain=dom,
sparse=False,
attrs=[
tiledb.Attr(
name="contents",
dtype=numpy.uint8,
filters=tiledb.FilterList([tiledb.ZstdFilter()]),
)
],
ctx=ctx,
)
# Goal: tiledb://my_username/s3://my_bucket/my_array
# https://docs.tiledb.com/cloud/how-to/arrays/create-arrays
tiledb_uri_s3 = "tiledb://" + posixpath.join(namespace, storage_path, array_name)
# Create the (empty) array on disk.
tiledb.Array.create(tiledb_uri_s3, schema)
tiledb_uri = "tiledb://" + posixpath.join(namespace, array_name)
time.sleep(0.25)
file_properties = {}
array.update_info(uri=tiledb_uri, array_name=array_name)
array.update_file_properties(
uri=tiledb_uri,
file_type=tiledb.cloud.rest_api.models.FileType.NOTEBOOK,
# If file_properties is empty, don't send anything at all.
file_properties=file_properties or None,
)
return tiledb_uri, array_name
def _write_notebook_to_array(
tiledb_uri: str,
ipynb_file_contents: str,
ctx: tiledb.Ctx,
) -> None:
"""Writes the given bytes to the array.
:param tiledb_uri: such as "tiledb://TileDB-Inc/quickstart_dense".
:param ipnyb_file_contents: The contents of the notebook file as a string,
nominally in JSON format.
:param ctx: cloud context for the operation.
"""
# Note: every array is opened at a particular timestamp. Data and metadata
# writes are separate: write of metadata doesn't happen until the array is
# closed. But in Python, it's in a "with" context so data and metadata
# writes get the same timestamp.
# Why character-encoding is needed: in Python, len("Doppelgänger") is 12
# but len(bytes("Doppelgänger", "utf-8")) is 13. We store the file
# contents as an array of bytes, so we need the encoding to get the right
# byte-count for the file-contents string.
contents_as_array = numpy.array(bytearray(ipynb_file_contents, CHARACTER_ENCODING))
with tiledb.open(tiledb_uri, mode="w", ctx=ctx) as arr:
arr[0 : len(contents_as_array)] = {"contents": contents_as_array}
arr.meta["file_size"] = len(contents_as_array)
arr.meta["type"] = file_type = tiledb.cloud.rest_api.models.FileType.NOTEBOOK
arr.meta["format"] = "json"
|
#!/usr/bin/evn python
# -*- coding: utf-8 -*-
# python version 2.7.6
import gzip,shutil,os
def gzipCompress(fileName):
#生成压缩后的文件名
tarFileName = fileName+".gz"
with gzip.open(tarFileName, 'wb') as f_out,open(fileName,'rb') as f_in:
shutil.copyfileobj(f_in, f_out)
def gzipUnCompress(tarFileName):
fileName = os.path.splitext(tarFileName)[0]
print fileName
with gzip.open(tarFileName, 'rb') as f_in,open(fileName,'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
#if __name__ == '__main__':
#gzipCompress("/Users/mac/Documents/data/1.fastq")
#gzipUnCompress("/Users/mac/Documents/data/4.fastq.gz") |
# flake8: noqa
import unittest
from tests.settings.arguments import ArgumentsParserTestCase
if __name__ == '__main__':
unittest.main()
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import routes
from senlin.api.openstack.v1 import actions
from senlin.api.openstack.v1 import build_info
from senlin.api.openstack.v1 import cluster_policies
from senlin.api.openstack.v1 import clusters
from senlin.api.openstack.v1 import events
from senlin.api.openstack.v1 import nodes
from senlin.api.openstack.v1 import policies
from senlin.api.openstack.v1 import policy_types
from senlin.api.openstack.v1 import profile_types
from senlin.api.openstack.v1 import profiles
from senlin.common import wsgi
class API(wsgi.Router):
'''WSGI router for Cluster v1 ReST API requests.'''
def __init__(self, conf, **local_conf):
self.conf = conf
mapper = routes.Mapper()
# Profile_types
profile_types_resource = profile_types.create_resource(conf)
with mapper.submapper(controller=profile_types_resource,
path_prefix="/{tenant_id}") as sub_mapper:
sub_mapper.connect("profile_type_index",
"/profile_types",
action="index",
conditions={'method': 'GET'})
sub_mapper.connect("profile_type_schema",
"/profile_types/{type_name}",
action="schema",
conditions={'method': 'GET'})
# Profiles
profiles_resource = profiles.create_resource(conf)
with mapper.submapper(controller=profiles_resource,
path_prefix="/{tenant_id}") as sub_mapper:
sub_mapper.connect("profile_index",
"/profiles",
action="index",
conditions={'method': 'GET'})
sub_mapper.connect("profile_create",
"/profiles",
action="create",
conditions={'method': 'POST'})
sub_mapper.connect("profile_get",
"/profiles/{profile_id}",
action="get",
conditions={'method': 'GET'})
sub_mapper.connect("profile_update",
"/profiles/{profile_id}",
action="update",
conditions={'method': 'PATCH'})
sub_mapper.connect("profile_delete",
"/profiles/{profile_id}",
action="delete",
conditions={'method': 'DELETE'})
# Policy Types
policy_types_resource = policy_types.create_resource(conf)
with mapper.submapper(controller=policy_types_resource,
path_prefix="/{tenant_id}") as sub_mapper:
# Policy collection
sub_mapper.connect("policy_type_index",
"/policy_types",
action="index",
conditions={'method': 'GET'})
sub_mapper.connect("policy_type_schema",
"/policy_types/{type_name}",
action="schema",
conditions={'method': 'GET'})
# Policies
policies_resource = policies.create_resource(conf)
with mapper.submapper(controller=policies_resource,
path_prefix="/{tenant_id}") as sub_mapper:
sub_mapper.connect("policy_index",
"/policies",
action="index",
conditions={'method': 'GET'})
sub_mapper.connect("policy_create",
"/policies",
action="create",
conditions={'method': 'POST'})
sub_mapper.connect("policy_get",
"/policies/{policy_id}",
action="get",
conditions={'method': 'GET'})
sub_mapper.connect("policy_update",
"/policies/{policy_id}",
action="update",
conditions={'method': 'PATCH'})
sub_mapper.connect("policy_delete",
"/policies/{policy_id}",
action="delete",
conditions={'method': 'DELETE'})
# Clusters
clusters_resource = clusters.create_resource(conf)
with mapper.submapper(controller=clusters_resource,
path_prefix="/{tenant_id}") as sub_mapper:
sub_mapper.connect("cluster_index",
"/clusters",
action="index",
conditions={'method': 'GET'})
sub_mapper.connect("cluster_create",
"/clusters",
action="create",
conditions={'method': 'POST'})
sub_mapper.connect("cluster_get",
"/clusters/{cluster_id}",
action="get",
conditions={'method': 'GET'})
sub_mapper.connect("cluster_update",
"/clusters/{cluster_id}",
action="update",
conditions={'method': 'PATCH'})
sub_mapper.connect("cluster_action",
"/clusters/{cluster_id}/action",
action="action",
conditions={'method': 'PUT'})
sub_mapper.connect("cluster_delete",
"/clusters/{cluster_id}",
action="delete",
conditions={'method': 'DELETE'})
# Nodes
nodes_resource = nodes.create_resource(conf)
with mapper.submapper(controller=nodes_resource,
path_prefix="/{tenant_id}") as sub_mapper:
sub_mapper.connect("node_index",
"/nodes",
action="index",
conditions={'method': 'GET'})
sub_mapper.connect("node_create",
"/nodes",
action="create",
conditions={'method': 'POST'})
sub_mapper.connect("node_get",
"/nodes/{node_id}",
action="get",
conditions={'method': 'GET'})
sub_mapper.connect("node_update",
"/nodes/{node_id}",
action="update",
conditions={'method': 'PATCH'})
sub_mapper.connect("node_action",
"/nodes/{node_id}/action",
action="action",
conditions={'method': 'PUT'})
sub_mapper.connect("node_delete",
"/nodes/{node_id}",
action="delete",
conditions={'method': 'DELETE'})
# Cluster Policies
cluster_policies_resource = cluster_policies.create_resource(conf)
policies_path = "/{tenant_id}/clusters/{cluster_id}"
with mapper.submapper(controller=cluster_policies_resource,
path_prefix=policies_path) as sub_mapper:
sub_mapper.connect("cluster_policy_list",
"/policies",
action="index",
conditions={'method': 'GET'})
sub_mapper.connect("cluster_policy_show",
"/policies/{policy_id}",
action="get",
conditions={'method': 'GET'})
# Actions
actions_resource = actions.create_resource(conf)
with mapper.submapper(controller=actions_resource,
path_prefix="/{tenant_id}") as sub_mapper:
sub_mapper.connect("action_index",
"/actions",
action="index",
conditions={'method': 'GET'})
sub_mapper.connect("action_create",
"/actions",
action="create",
conditions={'method': 'POST'})
sub_mapper.connect("action_get",
"/actions/{action_id}",
action="get",
conditions={'method': 'GET'})
# Events
events_resource = events.create_resource(conf)
with mapper.submapper(controller=events_resource,
path_prefix="/{tenant_id}") as sub_mapper:
sub_mapper.connect("event_index",
"/events",
action="index",
conditions={'method': 'GET'})
sub_mapper.connect("event_get",
"/events/{event_id}",
action="get",
conditions={'method': 'GET'})
# Info
info_resource = build_info.create_resource(conf)
with mapper.submapper(controller=info_resource,
path_prefix="/{tenant_id}") as sub_mapper:
sub_mapper.connect("build_info",
"/build_info",
action="build_info",
conditions={'method': 'GET'})
super(API, self).__init__(mapper)
|
import bpy
from bpy.props import *
from ..BASE.node_tree import RenderStackNode
from ...utility import *
def update_node(self, context):
if not self.use: return None
task_node = context.space_data.node_tree.nodes.get(bpy.context.window_manager.rsn_viewer_node)
if task_node: task_node.update()
class VariantsNodeProperty(bpy.types.PropertyGroup):
name: StringProperty(name="The name of the variants node")
active: IntProperty(default=0, min=0, name="Active Input",update=update_node)
use: BoolProperty(default=True, name="Use for render",
description="If enable, the active input of the variant node will be apply to the Scene,else it will apply the last input of the variant node")
# use uilist for visualization
class RSN_UL_VarCollectNodeList(bpy.types.UIList):
def draw_item(self, context, layout, data, item, icon, active_data, active_propname, index):
sub = layout.split(align=True, factor=0.5)
sub.label(text=item.name, icon="RADIOBUT_OFF")
row = sub.row()
row.prop(item, "active", text="Active")
row.prop(item, "use", text="", icon="CHECKMARK")
class RSN_OT_UpdateVarCollect(bpy.types.Operator):
"""ADD/REMOVE List item"""
bl_idname = "rsn.update_var_collect"
bl_label = "Update Collect"
action: EnumProperty(name="Edit", items=[('ADD', 'Add', ''), ('REMOVE', 'Remove', '')])
sort: BoolProperty(name="Sort", description="Sort when update collect", default=True)
node_name: StringProperty(default='')
node = None
def execute(self, context):
self.node = context.space_data.edit_tree.nodes[self.node_name]
if self.action == "ADD":
self.get_var_nodes()
self.node.node_collect_index = len(self.node.node_collect) - 1
if self.sort:
self.sort_items()
return {"FINISHED"}
def get_var_nodes(self):
nt = bpy.context.space_data.edit_tree
RSN = RSN_Nodes(node_tree=nt, root_node_name=self.node.name)
nodes = RSN.get_children_from_node(root_node=self.node)
node_list = ','.join(
[node_name for node_name in nodes if nt.nodes[node_name].bl_idname == "RSNodeVariantsNode"])
for i, src_node in enumerate(self.node.node_collect.keys()):
if src_node not in node_list.split(','):
self.node.node_collect.remove(i)
self.node.node_collect_index -= 1 if self.node.node_collect_index != 0 else 0
for node_name in node_list.split(','):
if node_name != '' and node_name not in self.node.node_collect.keys():
prop = self.node.node_collect.add()
prop.name = node_name
prop.active = 0
def sort_items(self):
item_list = [{"name": k, "value": v.active} for k, v in self.node.node_collect.items()]
sort_list = sorted(item_list, key=lambda x: x["name"])
self.node.node_collect.clear()
for i, item in enumerate(sort_list):
prop = self.node.node_collect.add()
prop.name = item_list[i]["name"]
prop.active = item_list[i]["value"]
class RSNodeSetVariantsNode(RenderStackNode):
"""A simple input node"""
bl_idname = 'RSNodeSetVariantsNode'
bl_label = 'Set Variants'
node_list = None
node_collect: CollectionProperty(name="Node Property", type=VariantsNodeProperty)
node_collect_index: IntProperty(default=0)
sort: BoolProperty(name="Sort", description="Sort when update collect", default=True)
def init(self, context):
self.width = 220
self.inputs.new('RSNodeSocketTaskSettings', "Input")
self.outputs.new('RSNodeSocketTaskSettings', "Output")
def draw_buttons(self, context, layout):
row = layout.row(align=1)
row.template_list(
"RSN_UL_VarCollectNodeList", "The list",
self, "node_collect",
self, "node_collect_index", )
row = layout.row(align=1)
edit = row.operator("rsn.update_var_collect", icon="FILE_REFRESH")
edit.action = "ADD"
edit.node_name = self.name
edit.sort = self.sort
row.prop(self, "sort", icon='SORTSIZE', text='')
def get_data(self):
pass
# for item in self.node_collect:
# if item.use:
# node = bpy.context.space_data.edit_tree.nodes[item.name]
# if node.active != item.active: node.active = item.active
def register():
bpy.utils.register_class(VariantsNodeProperty)
bpy.utils.register_class(RSN_UL_VarCollectNodeList)
bpy.utils.register_class(RSN_OT_UpdateVarCollect)
bpy.utils.register_class(RSNodeSetVariantsNode)
def unregister():
bpy.utils.unregister_class(VariantsNodeProperty)
bpy.utils.unregister_class(RSN_UL_VarCollectNodeList)
bpy.utils.unregister_class(RSN_OT_UpdateVarCollect)
bpy.utils.unregister_class(RSNodeSetVariantsNode)
|
"""
w1thermsensor
~~~~~~~~~~~~~
A Python package and CLI tool to work with w1 temperature sensors.
:copyright: (c) 2020 by Timo Furrer <tuxtimo@gmail.com>
:license: MIT, see LICENSE for more details.
"""
import os
import time
import pytest
from w1thermsensor.errors import KernelModuleLoadError
from w1thermsensor.kernel import load_kernel_modules
def test_kernel_module_load_error(monkeypatch):
"""Test exception if kernel modules cannot be loaded"""
# given
monkeypatch.setattr(os, "system", lambda x: True)
monkeypatch.setattr(os.path, "isdir", lambda x: False)
monkeypatch.setattr(time, "sleep", lambda x: True)
expected_error_msg = "Cannot load w1 therm kernel modules"
# when & then
with pytest.raises(KernelModuleLoadError, match=expected_error_msg):
load_kernel_modules()
|
#!/usr/bin/env python3
from clib.mininet_test_util import get_serialno
from clib.mininet_test_topo import FaucetSwitchTopo, SWITCH_START_PORT
class FaucetTopoGenerator(FaucetSwitchTopo):
"""
Generate a Faucet topology for use in the mininet integration tests
FaucetTopoGenerator is able to connect up a network in an arbitrary
topology based off a dp_link, host_link and host_vlan dictionary
"""
@staticmethod
def dp_links_networkx_graph(graph, offset=0, n_dp_links=1):
"""
Networkx provides methods for generating different graphs
Args:
graph: Networkx graph
offset: DP offset
n_dp_links: Redundant switch-switch links
Return dp_links a networkx graph
"""
dp_links = {}
for edge in graph.edges():
src = edge[0] + offset
dst = edge[1] + offset
if src not in dp_links:
dp_links[src] = []
for _ in range(n_dp_links):
dp_links[src].append(dst)
return dp_links
@staticmethod
def tagged_untagged_hosts(n_dps, n_tagged, n_untagged,
n_host_links=1, dp_offset=0, host_offset=0):
"""
Generate links & vlans for a number of tagged and untagged vlan hosts on each dp
Args:
n_dps: Number of DPs to generate hosts on
n_tagged: Number of tagged hosts to generate on each DP
n_untagged: Number of untagged hosts to generate on each DP
n_host_links: Number of redundant host to switch links
dp_offset: DP index offset
host_offset: Host index offset
Return host_links, host_vlans
"""
host_links = {}
host_vlans = {}
vlan = 0
host_id = host_offset
for i in range(n_dps):
for _ in range(n_tagged):
host_links[host_id] = []
for _ in range(n_host_links):
host_links[host_id].append(i + dp_offset)
host_vlans[host_id] = (vlan,)
host_id += 1
for _ in range(n_untagged):
host_links[host_id] = []
for _ in range(n_host_links):
host_links[host_id].append(i + dp_offset)
host_vlans[host_id] = vlan
host_id += 1
return host_links, host_vlans
@staticmethod
def tagged_vlan_hosts(n_dps, vlan, n_host_links=1, dp_offset=0, host_offset=0):
"""
Generate dictionaries for a single tagged host on each DP
Args:
n_dps: Number of DPs to generate hosts on
vlan: The host's tagged VLAN
n_host_links: Number of redundant links
dp_offset: DP index offset
host_offset: Host index offset
Return host_links, host_vlans
"""
host_links = {}
host_vlans = {}
host_id = host_offset
for i in range(n_dps):
host_links[host_id] = []
for _ in range(n_host_links):
host_links[host_id].append(i + dp_offset)
host_vlans[host_id] = (vlan, )
host_id += 1
return host_links, host_vlans
@staticmethod
def untagged_vlan_hosts(n_dps, n_vlans, n_host_links=1, dp_offset=0, host_offset=0):
"""
Generate dictionaries for an untagged host on each vlan on each DP
Args:
n_dps: Number of DPs to generate hosts on
n_vlans: Number of vlans to generate hosts on
n_host_links: Number of redundant links
dp_offset: DP index offset
host_offset: Host index offset
Return host_links, host_vlans
"""
host_links = {}
host_vlans = {}
host_id = host_offset
for i in range(n_dps):
for vlan in range(n_vlans):
host_links[host_id] = []
for _ in range(n_host_links):
host_links[host_id].append(i + dp_offset)
host_vlans[host_id] = vlan
host_id += 1
return host_links, host_vlans
@staticmethod
def untagged_vlan_hosts_by_amount(n_dps, n_vlan_hosts,
n_host_links=1, dp_offset=0, host_offset=0):
"""
Generate dictionaries for untagged hosts on each DP with specified number of hosts
Args:
n_dps: Number of DPs to generate hosts on
n_vlans: Number of VLANs
n_vlan_hosts (dict): VLAN index to number of hosts on that VLAN on each DP
n_host_links: Number of redundant host-switch links
dp_offset: DP index offset
host_offset: Host index offset
Return host_links, host_vlans
"""
host_links = {}
host_vlans = {}
host_id = host_offset
for i in range(n_dps):
for vlan, n_hosts in n_vlan_hosts.items():
for _ in range(n_hosts):
host_links[host_id] = []
for _ in range(n_host_links):
host_links[host_id].append(i + dp_offset)
host_vlans[host_id] = vlan
host_id += 1
return host_links, host_vlans
def dpid_peer_links(self, dpid):
"""Return peer_link list for dpid, remapping if necessary"""
name = self.dpid_names[dpid]
links = [self.hw_remap_peer_link(dpid, link) for link in self.switch_peer_links[name]]
return links
def _add_host_to_switch_link(self, switch, dpid, host, curr_index):
"""
Add a link from a switch to a host
Args:
switch: Switch
dpid: Switch dpid
host: Host
curr_index: Port order index
"""
self.switch_ports.setdefault(switch, [])
self.dpid_port_host.setdefault(int(dpid), {})
index = curr_index
port = self.start_port + self.port_order[index]
self.addLink(switch, host, port1=port, delay=self.DELAY, use_htb=True)
self.switch_ports[switch].append(port)
self.dpid_port_host[int(dpid)][port] = host
index += 1
return index
def _add_switch_to_switch_link(self, src, dst, next_index):
"""
Args:
src: Source switch
dst: Dest switch
next_index: Next port order index
"""
self.switch_peer_links.setdefault(src, [])
self.switch_peer_links.setdefault(dst, [])
dpid1, dpid2 = self.switch_dpids[src], self.switch_dpids[dst]
index1, index2 = next_index[src], next_index[dst]
port1, port2 = [self.start_port + self.port_order[i] for i in (index1, index2)]
self.addLink(src, dst, port1=port1, port2=port2)
# Update port and link lists
self.switch_ports.setdefault(src, [])
self.switch_ports.setdefault(dst, [])
self.switch_ports[src].append(port1)
self.switch_ports[dst].append(port2)
self.switch_peer_links[src].append(self.peer_link(port1, dpid2, port2))
self.switch_peer_links[dst].append(self.peer_link(port2, dpid1, port1))
# Update next indices on src and dest
next_index[src] += 1
next_index[dst] += 1
def build(self, ovs_type, ports_sock, test_name, dpids,
dp_links, host_links, host_vlans, vlan_vids,
hw_dpid=None, switch_map=None, start_port=SWITCH_START_PORT,
port_order=None, get_serialno=get_serialno):
"""
Creates the Faucet mininet switches & hosts
Args:
dp_links (dict): dp id key to list of dp id value
host_links (dict): host id key to list of dp id value
host_vlans (dict): host id key to vlans id value
vlan_vids (dict): VLAN IDs for vlan index
"""
self.hw_dpid = hw_dpid
self.hw_ports = sorted(switch_map) if switch_map else []
self.start_port = start_port
self.switch_to_switch_links = 0
for dplinks in dp_links.values():
self.switch_to_switch_links += len(dplinks)
self.host_to_switch_links = 0
for hostlinks in host_links.values():
self.host_to_switch_links += len(hostlinks)
max_ports = self.host_to_switch_links + (2 * self.switch_to_switch_links)
self.port_order = self.extend_port_order(port_order, max_ports)
# Create hosts
self.hosts_by_id = {}
for host_id, vlans in host_vlans.items():
serialno = get_serialno(ports_sock, test_name)
sid_prefix = self._get_sid_prefix(serialno)
if isinstance(vlans, int):
self.hosts_by_id[host_id] = self._add_untagged_host(sid_prefix, host_id)
elif isinstance(vlans, tuple):
self.hosts_by_id[host_id] = self._add_tagged_host(
sid_prefix, [vlan_vids[v] for v in vlans], host_id)
# Create switches & then host-switch links
self.switch_peer_links = {}
next_index = {}
self.dpid_to_switch = {}
for i, dpid in enumerate(dpids):
serialno = get_serialno(ports_sock, test_name)
sid_prefix = self._get_sid_prefix(serialno)
switch = self._add_faucet_switch(sid_prefix, dpid, hw_dpid, ovs_type)
self.dpid_to_switch[dpid] = switch
next_index[switch] = 0
# Create host-switch links
for host_id, hostlinks in host_links.items():
if i in hostlinks:
n_links = hostlinks.count(i)
for _ in range(n_links):
host = self.hosts_by_id[host_id]
next_index[switch] = self._add_host_to_switch_link(
switch, dpid, host, next_index[switch])
# Create switch-switch links
for src_index, dplinks in dp_links.items():
for dst_index in dplinks:
src = self.dpid_to_switch[dpids[src_index]]
dst = self.dpid_to_switch[dpids[dst_index]]
self._add_switch_to_switch_link(src, dst, next_index)
|
"""A direct copy of a Google Spreadsheet to a postgresql database"""
import psycopg2
import pyiem.cscap_utils as util
from unidecode import unidecode
from six import string_types
config = util.get_config()
pgconn = psycopg2.connect(
database="sustainablecorn", host=config["database"]["host"]
)
ss = util.get_ssclient(config)
JOB_LISTING = [
["1292573529663364", "refereed_journals"],
["6868926064813956", "theses"],
["3644715322107780", "data_dictionary_export"],
["6669830439888772", "highvalue_notes"],
]
def cleaner(val):
"""Clean this value"""
val = val.lower().replace(" ", "_").replace("(", "").replace(")", "")
val = val.replace("/", " ")
return val
def workflow(sheetid, tablename):
"""Process"""
cursor = pgconn.cursor()
cursor.execute(f"DROP TABLE IF EXISTS {tablename}")
sheet = ss.Reports.get_report(sheetid, page_size=1000)
cols = []
for col in sheet.columns:
cols.append(cleaner(col.title))
cursor.execute(
(
"""
CREATE TABLE """
+ tablename
+ """ (ss_order int, %s)
"""
)
% (",".join([' "%s" varchar' % (s,) for s in cols]),)
)
cursor.execute(
"""
GRANT SELECT on """
+ tablename
+ """ to nobody,apache
"""
)
for i, row in enumerate(sheet.rows):
vals = []
for cell in row.cells:
val = cell.value
if isinstance(val, string_types):
val = unidecode(val)
vals.append(val)
sql = """
INSERT into %s (ss_order, %s) VALUES (%s, %s)
""" % (
tablename,
",".join(['"%s"' % (s,) for s in cols]),
i,
",".join(["%s"] * len(cols)),
)
cursor.execute(sql, vals)
cursor.close()
pgconn.commit()
def main():
"""Do Something"""
for (sheetid, tablename) in JOB_LISTING:
workflow(sheetid, tablename)
if __name__ == "__main__":
main()
|
import json
import os
from urllib.request import urlopen, Request
def sendToPushover(msg):
appToken = os.environ.get('PushoverAppToken')
userKey = os.environ.get('PushoverUserToken')
data = {"token": appToken, "user": userKey, "message": msg}
rq = Request("https://api.pushover.net/1/messages.json",
data=json.dumps(data).encode("utf8"),
headers={'content-type': 'application/json'})
return urlopen(rq)
def lambda_handler(event, context):
print(event)
resp = ''
if 'queryStringParameters' in event:
qsp = event['queryStringParameters']
if 'SmsSid' in qsp:
b = qsp["Body"]
f = qsp["From"]
msg = f"{f} says: '{b}'"
print(msg)
sendToPushover(msg)
forward = os.environ.get('ForwardTo')
if f == forward:
(r,v) = b.split(': ')
resp = f'<Message to="{r}">{v}</Message>'
else:
resp = f'<Message to="{forward}">{f}: {b}</Message>'
if 'CallSid' in qsp:
dir = qsp['Direction']
caller = qsp['Caller']
called = qsp['Called']
fr = qsp['From']
to = qsp['To']
twilioNumber = os.environ.get('TwilioNumber')
if 'SipCallId' in qsp:
callerId = twilioNumber
forward = to.strip('sip:').split('@')[0]
user = fr.strip('sip:').split('@')[0]
sendToPushover(f'Internal call from {user} to {forward}')
else:
callerId = fr
forward = os.environ.get('ForwardTo')
sendToPushover(f'Call from {fr} to {to}')
resp = f'<Dial callerId="{callerId}">{forward}</Dial>'
print(resp)
return { 'statusCode': 200
, 'body': f'<?xml version=\"1.0\" encoding=\"UTF-8\"?><Response>{resp}</Response>'
, 'headers': {"content-type": "text/xml"}
}
|
#!/usr/bin/env python
NAME = '(安全宝)anquanbao'
def is_waf(self):
return self.match_header(('X-Powered-By-Anquanbao', '.+'))
|
# OOP with Python
# Tech with Tim
class Dog(object):
def __init__(self, name, age):
# This is an attribute
self.name = name
self.age = age
# This is a method
def speak(self):
print(f"My name is {self.name}, and I am {self.age} years old.")
# (Dog) is the parent of Cat
class Cat(Dog):
def __init__(self, name, age, color):
super().__init__(name, age)
self.color = color
def speak(self):
print(f"My name is {self.name}, and I am {self.color} cat.")
'''
wiggles = Cat('Wiggles', 5, 'Tabby')
wiggles.speak()
stella = Dog('Stella', 6)
stella.speak()
'''
##################
##################
##################
class Vehicle():
def __init__(self, price, gas, color):
self.price = price
self.gas = gas
self.color = color
def fillTank(self):
self.gas = 100
def emptyTank(self):
self.gas = 0
def gasLeft(self):
return self.gas
class Car(Vehicle):
def __init__(self, price, gas, color, speed):
super().__init__(price, gas, color)
self.speed = speed
def horn(self):
print('Beep! Beep!')
class Truck(Vehicle):
def __init__(self, price, gas, color, tires):
super().__init__(price, gas, color)
self.tires = tires
def horn(self):
print('Honk! Honk!')
# This inherits from the Vehicle and Car classes
# Vehicle > Car > Motorcycle
class Motorcycle(Car):
def __init__(self, price, gas, color, speed, cc):
super().__init__(price, gas, color, speed)
self. cc = cc
##################
##################
##################
class Point():
def __init__(self, x=0, y=0):
self.x = x
self.y = y
self.coords = (self.x, self.y)
def move(self, x, y):
self.x += x
self.y += y
# addition
def __add__(self, p):
return Point(self.x + p.x, self.y + p.y)
# subtract
def __sub__(self, p):
return Point(self.x - p.x, self.y - p.y)
# multiply
def __mul__(self, p):
return (self.x * p.x) + (self.y * p.y)
# Math for gt, lt, and equals
def length(self):
import math
return math.sqrt(self.x**2 + self.y**2)
# greater than
def __gt__(self, p):
return self.length() > p.length()
# greater then/equal
def __ge__(self, p):
return self.length() >= p.length()
# less than
def __lt__(self, p):
return self.length() < p.length()
# less then/equal
def __le__(self, p):
return self.length() <= p.length()
# equals
def __eq__(self, p):
return self.x == p.x and self.y == p.y
# converts to str
def __str__(self):
return "(" + str(self.x) + "," + str(self.y) + ")"
p1 = Point(3, 4)
p2 = Point(3, 2)
p3 = Point(1, 3)
p4 = Point(0, 1)
# Addition
p5 = p1 + p2
# Subtraction
p6 = p4 - p1
# Multiplication
p7 = p5 * p6
'''
print(f"{p5}, {p6}, {p7}")
print(p1 == p2)
print(p1 > p3)
print(p4 <= p6)
'''
|
""" +=========================================================================================+
|| Lab04: A* Search ||
|| Name: Rashid Lasker Date: 9/18/14 ||
+=========================================================================================+
This program generates a word ladder between two words and prints it out.
"""
#######################################<BEGINNING OF PROGRAM>#######################################
def getNeighborDictFromFile():
#fileName = '/afs/csl.tjhsst.edu/students/2016/2016rlasker/Documents/oneChangeDict.txt'
fileName = 'oneChangeDict.txt'
file1 = open(fileName, 'rb')
import pickle
neighborDict = pickle.load(file1)
file1.close()
return neighborDict
#---------------------------------------------------------------------------------------------------
def checkWord(word, dictionary):
if word in dictionary:
return word
else:
exit('Word is not in dictionary')
#---------------------------------------------------------------------------------------------------
def h(word, finalWord):
return sum([finalWord[n] != word[n] for n in range (6)])
#---------------------------------------------------------------------------------------------------
def findWordLadder(initialWord, finalWord, dictionary):
queue = [(0 + h(initialWord, finalWord), initialWord,[], 0)]
CLOSED = dict()
popCount = 0
maxQueueLength = 0
while queue:
popCount += 1
parentNode = queue.pop(0)
parentName = parentNode[1]
parentPath = parentNode[2]
parentG = parentNode[3]
currentPath = []
currentPath += parentPath
currentPath.append(parentName)
currentG = parentG + 1
if parentName == finalWord:
print(currentPath)
print('Length = ' + str(len(currentPath)))
break
CLOSED[parentName] = parentG
childrenList = dictionary.get(parentName)
step5(queue, CLOSED, finalWord, childrenList, currentG, currentPath)
if len(queue)> maxQueueLength:
maxQueueLength = len(queue)
queue.sort()
print('Longest queue = ' + str(maxQueueLength))
print('Pops = ' + str(popCount))
#---------------------------------------------------------------------------------------------------
def step5(queue, CLOSED, finalWord, childrenList, currentG, currentPath):
for currentChild in childrenList:
newChild = (currentG + h(currentChild, finalWord), currentChild, currentPath, currentG)
isAlreadyInOpen = False
index = 0
for n in range(len(queue)):
if queue[n][1] == currentChild:
isAlreadyInOpen = True
index = n
break
if currentChild in CLOSED:
if CLOSED[currentChild] <= currentG:
continue
elif CLOSED[currentChild] > (currentG):
del CLOSED[currentChild]
queue.append(newChild)
elif not isAlreadyInOpen:
queue.append(newChild)
elif isAlreadyInOpen:
if queue[n][3] > (currentG):
queue.remove(queue[n])
queue.append(newChild)
#-----------------------------------------------<Main>----------------------------------------------
def main():
neighborDict = getNeighborDictFromFile()
firstWord = 'silver'
secondWord = 'sliver'
checkWord(firstWord, neighborDict)
checkWord(secondWord, neighborDict)
findWordLadder(firstWord, secondWord, neighborDict)
#===============================<GLOBAL CONSTANTS and GLOBAL IMPORTS================================
from random import random, randint; from math import sqrt; from copy import deepcopy;
from time import clock; START_TIME = clock(); main(); print('\n +===<RUN TIME>===+');
print(' | %5.2f'%(clock()-START_TIME), 'seconds |'); print(' +================+')
##########################################<END OF PROGRAM>##########################################
|
# P4 Trigger script that triggers buildkite builds
# Usage:
# my-pipeline change-commit //depot/... "python %//depot/scripts/buildkite-trigger.py% <pipeline> %changelist% %user%"
import sys
import subprocess
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
import json
__BUILDKITE_TOKEN__ = "<your_token>"
__ORG_SLUG__ = "<your_org>"
pipeline_slug = sys.argv[1]
changelist = sys.argv[2]
user = sys.argv[3]
description = subprocess.check_output(["p4", "-Ztag", "-F", "%desc%", "describe", changelist])
headers = {
'Content-Type': 'application/json',
'Authorization': 'Bearer %s' % __BUILDKITE_TOKEN__
}
payload = {
'commit': '@' + changelist,
'branch': 'master',
'message': description,
'author': {
'name': user
}
}
url = "https://api.buildkite.com/v2/organizations/%s/pipelines/%s/builds" % (__ORG_SLUG__, pipeline_slug)
params = json.dumps(payload).encode('utf8')
req = Request(url, data=params, headers=headers)
response = urlopen(req)
# print(response.read())
|
class Vec3:
def __init__(self, *args):
if len(args) == 3:
self.x, self.y, self.z = args[0], args[1], args[2]
elif len(args) == 0:
self.x, self.y, self.z = 0, 0, 0
else:
raise
def __add__(self, v):
if isinstance(v, Vec3):
return Vec3(self.x+v.x, self.y+v.y, self.z+v.z)
else:
raise
def __sub__(self, v):
if isinstance(v, Vec3):
return Vec3(self.x-v.x, self.y-v.y, self.z-v.z)
else:
raise
def __mul__(self, v):
if isinstance(v, float) or isinstance(v, int):
return Vec3(self.x*v, self.y*v, self.z*v)
else:
raise
def __repr__(self):
return "Vec3(%.2f, %.2f, %.2f)" % (self.x, self.y, self.z)
def dot(self, v):
return self.x*v.x + self.y*v.y + self.z*v.z
def cross(self, v):
x = self.y*v.z - self.z*v.y
y = self.z*v.x - self.x*v.z
z = self.x*v.y - self.y*v.x
return Vec3(x, y, z)
def length(self):
return sqrt(self.dot(self))
def normalize(self):
l = self.length()
return Vec3(self.x/l, self.y/l, self.z/l)
class Vec4:
def __init__(self, *args):
if len(args) == 4:
self.x, self.y, self.z, self.w = args[0], args[1], args[2], args[3]
else:
raise
class Mat4:
def __init__(self, *args):
if len(args) == 1:
self.copy(args[0])
elif len(args) == 3:
if isinstance(args[0], Vec3):
X, Y, Z = args[0], args[1], args[2]
self.m = [[X.x, Y.x, Z.x, 0],
[X.y, Y.y, Z.y, 0],
[X.z, Y.z, Z.z, 0],
[0, 0, 0, 1]]
elif len(args) == 4:
if isinstance(args[0], Vec3):
X, Y, Z, O = args[0], args[1], args[2], args[3]
self.m = [[X.x, Y.x, Z.x, O.x],
[X.y, Y.y, Z.y, O.y],
[X.z, Y.z, Z.z, O.z],
[0, 0, 0, 1]]
else:
self.zero()
def copy(self, m):
for i in range(4):
for j in range(4):
self.m[i][j] = m.m[i][j]
return self
def zero(self):
self.m = [[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]]
return self
def identity(self):
self.m = [[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]]
return self
def scale(self, *args):
if len(args) == 1:
sx, sy, sz = args[0], args[0], args[0]
elif len(args) == 3:
sx, sy, sz = args[0], args[1], args[2]
else:
raise
self.m = [[sx, 0, 0, 0],
[0, sy, 0, 0],
[0, 0, sz, 0],
[0, 0, 0, 1]]
return self
def translate(self, *args):
if len(args) == 3:
tx, ty, tz = args[0], args[1], args[2]
self.m = [[1, 0, 0, tx],
[0, 1, 0, ty],
[0, 0, 1, tz],
[0, 0, 0, 1]]
return self
def ndc2scr(self):
global width
global height
hw = width / 2.0
hh = height / 2.0
n = 1.0
f = 1000.0
self.m = [[hw, 0, 0, hw],
[0, hh, 0, hh],
[0, 0, (f-n)/2, (f-n)/2],
[0, 0, 0, 1]]
return self
def ortho(self, left, right, bottom, top, near, far):
sx = 2 / (right - left)
sy = 2 / (top - bottom)
sz = -2 / (far - near)
tx = (right + left) / (right - left)
ty = (top + bottom) / (top - bottom)
tz = (far + near) / (far - near)
self.m = [[sx, 0, 0, tx],
[0, sy, 0, ty],
[0, 0, sz, tz],
[0, 0, 0, 1]]
return self
def perspective(self, fovy, aspect, znear, zfar):
f = 1 / tan(fovy/2)
a = aspect
z1 = (zfar+znear) / (znear-zfar)
z2 = (2*zfar*znear) / (znear-zfar)
self.m = [[f/a, 0, 0, 0],
[0, f, 0, 0],
[0, 0, z1, z2],
[0, 0, -1, 0]]
return self
def lookAt(self, eye, center, up):
f = center - eye
f = f.normalize()
s = f.cross(up)
s = s.normalize()
u = s.cross(f)
self.m = [[s.x, s.y, s.z, 0],
[u.x, u.y, u.z, 0],
[-f.x, -f.y, -f.z, 0],
[0, 0, 0, 1]]
T = Mat4().translate(-eye.x, -eye.y, -eye.z)
return T * self
def __mul__(self, p):
if isinstance(p, int) or isinstance(p, float):
m = Mat4(self)
for i in range(4):
for j in range(4):
m.m[i][j] *= p
return m
elif isinstance(p, Vec3):
v = Vec4(p.x, p.y, p.z, 1)
v = self.__mul__(v)
return Vec3(v.x, v.y, v.z)
elif isinstance(p, Vec4):
x = self.m[0][0]*p.x + self.m[0][1]*p.y + self.m[0][2]*p.z + p.w*self.m[0][3]
y = self.m[1][0]*p.x + self.m[1][1]*p.y + self.m[1][2]*p.z + p.w*self.m[1][3]
z = self.m[2][0]*p.x + self.m[2][1]*p.y + self.m[2][2]*p.z + p.w*self.m[2][3]
w = self.m[3][0]*p.x + self.m[3][1]*p.y + self.m[3][2]*p.z + p.w*self.m[3][3]
if w == 0:
x, y, z, w = float("inf"), float("inf"), float("inf"), 1
return Vec4(x/w, y/w, z/w, 1)
elif isinstance(p, Mat4):
m = Mat4()
for i in range(4):
for j in range(4):
for k in range(4):
m.m[i][j] += self.m[i][k]*p.m[k][j]
return m
else:
raise
def __str__(self):
return """Mat4[
%.2f %.2f %.2f %.2f
%.2f %.2f %.2f %.2f
%.2f %.2f %.2f %.2f
%.2f %.2f %.2f %.2f]""" % (self.m[0][0], self.m[0][1], self.m[0][2], self.m[0][3],
self.m[1][0], self.m[1][1], self.m[1][2], self.m[1][3],
self.m[2][0], self.m[2][1], self.m[2][2], self.m[2][3],
self.m[3][0], self.m[3][1], self.m[3][2], self.m[3][3])
class Quat:
def __init__(self, *args):
if len(args) == 0:
self.x, self.y, self.z, self.w = 0, 0, 0, 1
elif len(args) == 1:
self.x, self.y, self.z, self.w = args[0].x, args[0].y, args[0].z, args[0].w
elif len(args) == 4:
self.x, self.y, self.z, self.w = args[0], args[1], args[2], args[3]
else:
raise
def mat4(self):
x, y, z, w = self.x, self.y, self.z, self.w
x2 = x + x
y2 = y + y
z2 = z + z
xx = x * x2
xy = x * y2
xz = x * z2
yy = y * y2
yz = y * z2
zz = z * z2
wx = w * x2
wy = w * y2
wz = w * z2
m = Mat4()
m.m = [[1.0 - (yy + zz), xy - wz, xz + wy, 0],
[xy + wz, 1.0 - (xx + zz), yz - wx, 0],
[xz - wy, yz + wx, 1.0 - (xx + yy), 0],
[0, 0, 0, 1]]
return m
def inverse(self):
return Quat(-self.x, -self.y, -self.z, self.w)
def length(self):
l = self.x*self.x + self.y*self.y + self.z*self.z + self.w*self.w
return sqrt(l)
def __mul__(self, p):
if isinstance(p, Vec3):
return self.mat4() * p
else:
raise
def rotate(self, a, t):
s = sin(t/2)
c = cos(t/2)
self.x = a.x*s
self.y = a.y*s
self.z = a.z*s
self.w = c
return self
def point3(x, y, z, col):
global zbuffer
if not (0 <= x and x < width):
return
if not (0 <= y and y < height):
return
idx = int(y*width + x)
if idx >= len(zbuffer) or zbuffer[idx] < z:
return
zbuffer[idx] = z
stroke(col)
strokeWeight(5)
point(x, y)
def line3(p0, p1, col):
x0, y0, z0 = p0.x, p0.y, p0.z
x1, y1, z1 = p1.x, p1.y, p1.z
dx = abs(x1-x0)
dy = abs(y1-y0)
dz = abs(z1-z0)
sx = 1 if x0 < x1 else -1
sy = 1 if y0 < y1 else -1
sz = 1 if z0 < z1 else -1
dm = max(dx, dy, dz)
i = dm
x1 = y1 = z1 = dm/2
while True:
point3(x0, y0, z0, col)
i -= 1
if i <= 0:
break
x1 -= dx
if x1 < 0:
x1 += dm
x0 += sx
y1 -= dy
if y1 < 0:
y1 += dm
y0 += sy
z1 -= dz
if z1 < 0:
z1 += dm
z0 += sz
zbuffer = []
shape = 0
proj = 0
dim = 2.0
modelview = Mat4()
projection = Mat4()
aspect = 0.0
fov = radians(55)
ph, th = 0.0, 0.0
steps = radians(5.0)
def setup():
global plane
global zbuffer
global proj
global aspect
size(1280, 800)
aspect = width*1.0 / height
def eyesight():
global modelview
global ph
global th
if proj == 0:
st = sin(th)
ct = cos(th)
sp = sin(ph)
cp = cos(ph)
X = -2*dim * st * cp
Y = 2*dim * sp
Z = 2*dim * ct * cp
eye = Vec3(X, Y, Z)
center = Vec3()
up = Vec3(0, cp, 0)
modelview = Mat4().lookAt(eye, center, up)
else:
X = Quat().rotate(Vec3(1, 0, 0), ph)
Y = Quat().rotate(Vec3(0, 1, 0), th)
modelview = Y * X
def project():
global projection
global proj
global dim
if proj == 0:
projection = Mat4().perspective(fov, aspect, dim/4, 4*dim)
elif proj == 1:
projection = Mat4().ortho(-dim*aspect, dim*aspect, -dim, dim, -dim, dim)
def draw():
global zbuffer
global shape
zbuffer = [float("inf")]*(width*height)
background(100)
eyesight()
project()
drawShape(shape)
def drawAxis():
global modelview
global projection
T = Mat4().translate(-2.5, -1, -1)
M = Mat4().ndc2scr() * projection * T * modelview
a = M * Vec3()
b = M * Vec3(1, 0, 0)
line3(a, b, color(255, 0, 0))
b = M * Vec3(0, 1, 0)
line3(a, b, color(0, 255, 0))
b = M * Vec3(0, 0, 1)
line3(a, b, color(0, 0, 255))
def drawShape(shape):
if shape == 0:
drawSphere()
elif shape == 1:
drawCylinder()
elif shape == 2:
drawCone()
def drawPoly(verts):
M = Mat4().ndc2scr() * projection * modelview
for i in range(len(verts)):
j = (i + 1) % len(verts)
a = M * verts[i]
b = M * verts[j]
line3(a, b, color(23, 45, 50))
def drawSphere():
global steps
verts = []
ph2 = radians(-90)
while ph2 < radians(90):
th2 = 0
while th2 <= radians(360):
v = spherical(th2, ph2)
verts.append(v)
v = spherical(th2, ph2+steps)
verts.append(v)
if len(verts) == 4:
drawPoly(verts)
if len(verts) >= 4:
verts.pop(0)
verts.pop(0)
th2 += 2*steps
ph2 += steps
def drawCone():
global steps
i = 0
verts = []
while i <= radians(360):
v = Vec3(0, 0, 1)
verts.append(v)
v = Vec3(cos(i), sin(i), 0)
verts.append(v)
v = Vec3(cos(i+steps), sin(i+steps), 0)
verts.append(v)
drawPoly(verts)
verts = []
i += steps
R = Quat().rotate(Vec3(1, 0, 0), radians(90))
i = 0
while i <= radians(360):
v = R * Vec3(0, 0, 0)
verts.append(v)
v = R * Vec3(cos(i), 0, sin(i))
verts.append(v)
v = R * Vec3(cos(i+steps), 0, sin(i+steps))
verts.append(v)
drawPoly(verts)
verts = []
i += steps
def drawCylinder():
global steps
verts = []
i = 0
while i <= radians(360):
v = Vec3(cos(i), 1, sin(i))
verts.append(v)
v = Vec3(cos(i), -1, sin(i))
verts.append(v)
if len(verts) == 4:
drawPoly(verts)
if len(verts) >= 4:
verts.pop(0)
verts.pop(0)
i += steps
i = 1
while i >= -1:
verts = []
v = Vec3(0, i, 0)
verts.append(v)
j = 0
while j <= radians(360):
v = Vec3(i*cos(j), i, sin(j))
verts.append(v)
if len(verts) == 3:
drawPoly(verts)
verts.pop()
verts.pop()
j += steps
i -= 2
def spherical(th2, ph2):
x = sin(th2)*cos(ph2)
y = cos(th2)*cos(ph2)
z = sin(ph2)
return Vec3(x, y, z)
def keyPressed():
global th
global ph
global dim
global fov
global shape
global steps
if keyCode == RIGHT:
th += radians(5)
elif keyCode == LEFT:
th -= radians(5)
elif keyCode == UP:
ph += radians(5)
elif keyCode == DOWN:
ph -= radians(5)
elif key == '1':
fov -= radians(1)
elif key == '2':
fov += radians(1)
elif key == 'a':
dim += 0.1
elif key == 's':
dim -= 0.1
elif key == 'z':
shape = (shape - 1) % 3
elif key == 'x':
shape = (shape + 1) % 3
elif key == '3':
steps -= radians(5)
elif key == '4':
steps += radians(5)
if steps <= 0.0:
steps = 1
|
# Generated by Django 2.2 on 2020-07-28 14:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20200728_1753'),
]
operations = [
migrations.AlterField(
model_name='category',
name='description',
field=models.TextField(blank=True),
),
]
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os, sys
import pprint
from shutil import copy2
import xml.etree.ElementTree as ET
TAG_PREFIX='{http://www.cern.ch/cms/DDL}'
CMSSW_NOT_SET=1
TRACKER_MATERIAL_FILE_MISSING=2
HEADER = """
#ifndef SIMTRACKER_TRACKERMATERIALANALYSIS_LISTGROUPS_MATERIALDIFFERENCE_H
#define SIMTRACKER_TRACKERMATERIALANALYSIS_LISTGROUPS_MATERIALDIFFERENCE_H
void ListGroups::fillMaterialDifferences() {
"""
TRAILER = """
}
#endif // SIMTRACKER_TRACKERMATERIALANALYSIS_LISTGROUPS_MATERIALDIFFERENCE_H
"""
def checkEnvironment():
if not 'CMSSW_RELEASE_BASE' in os.environ.keys():
print('CMSSW Environments not setup, quitting\n')
sys.exit(CMSSW_NOT_SET)
def getTrackerRecoMaterialCopy(filename):
tracker_reco_material = os.path.join(os.environ['CMSSW_BASE'],
'src/Geometry/TrackerRecoData/data/PhaseI/v1/trackerRecoMaterial.xml')
if not os.path.exists(tracker_reco_material):
tracker_reco_material = os.path.join(os.environ['CMSSW_RELEASE_BASE'],
'src/Geometry/TrackerRecoData/data/PhaseI/v1/trackerRecoMaterial.xml')
if not os.path.exists(tracker_reco_material):
print('Something is wrong with the CMSSW installation. The file %s is missing. Quitting.\n' % tracker_reco_material)
sys.exit(TRACKER_MATERIAL_FILE_MISSING)
copy2(tracker_reco_material, filename)
def produceXMLFromParameterFile():
"""
Starting from the file parameters.xml produced by the
TrackingMaterialAnalyser via cmsRun, it writes out a new XML,
taking into account the proper names and grouping of detectors
together.
The skeleton of the XML is taken directly from the release the
user is currently using, i.e. from
$CMSSW_RELEASE_BASE/src/Geometry/TrackerRecoData/data/{,PhaseI/pixfwd}trackerRecoMaterial.xml.
A new file, named trackerRecoMaterial.xml, is saved in the
current directory.
"""
tracker_reco_material = './trackerRecoMaterialFromRelease.xml'
tracker_reco_material_updated = './parameters.xml'
ET.register_namespace('', "http://www.cern.ch/cms/DDL")
tree = ET.parse(tracker_reco_material)
root = tree.getroot()
tree_updated = ET.parse(tracker_reco_material_updated)
root_updated = tree_updated.getroot()
sections = root.getchildren()
for child in sections[0]:
print(child.attrib['name'])
for spec_par in root.iter('%sSpecPar' % TAG_PREFIX):
current_detector = spec_par.attrib['name']
for parameter in spec_par.iter('%sParameter' % TAG_PREFIX):
print(current_detector, parameter.attrib['name'], parameter.attrib['value'])
updated_current_detector_node = root_updated.find(".//Group[@name='%s']" % current_detector)
if updated_current_detector_node:
for child in updated_current_detector_node:
if child.attrib['name'] == parameter.attrib['name']:
parameter.set('name', child.attrib['name'])
parameter.set('value', child.attrib['value'])
print(current_detector, parameter.attrib['name'], parameter.attrib['value'])
else:
print("Missing group: %s" % current_detector)
tree.write('trackerRecoMaterial.xml', encoding='UTF-8', xml_declaration=True)
def compareNewXMLWithOld(format_for_twiki):
"""
Computes the difference between the old values, stored in the
central repository for the current release, i.e. from
$CMSSW_RELEASE_BASE/src/Geometry/TrackerRecoData/data/{,PhaseI/pixfwd}trackerRecoMaterial.xml,
and the new values that we assume are present in the same file
under the locally installed release, i.e. under
$CMSSW_BASE/src/Geometry/TrackerRecoData/data/{,PhaseI/pixfwd}trackerRecoMaterial.xml. No
check is performed to guarantee that the files are already
there. If the file is not there, it is searched in the current
folder. A missing file will result in an exception.
The output of this function is a formatted structured as:
ComponentsName KindOfParameter OldValue NewValue Difference
where the Difference is computed as (NewValue-OldValue)/OldValue.
Results are flushed at the terminal, nothing is saved.
"""
tracker_reco_material = './trackerRecoMaterialFromRelease.xml'
tracker_reco_material_updated = os.path.join(os.environ['CMSSW_BASE'],
'src/SimTracker/TrackerMaterialAnalysis/test/trackerRecoMaterial.xml')
if not os.path.exists(tracker_reco_material_updated):
tracker_reco_material_updated = './trackerRecoMaterial.xml'
if not os.path.exists(tracker_reco_material_updated):
raise os.error('Missing trackerRecoMaterial.xml file.')
ET.register_namespace('', "http://www.cern.ch/cms/DDL")
tree = ET.parse(tracker_reco_material)
root = tree.getroot()
tree_updated = ET.parse(tracker_reco_material_updated)
root_updated = tree_updated.getroot()
sections = root.getchildren()
header = open(os.path.join(os.environ['CMSSW_BASE'],
'src/SimTracker/TrackerMaterialAnalysis/plugins/ListGroupsMaterialDifference.h'), 'w')
header.write(HEADER)
differences = {}
values = {}
ordered_keys = []
for spec_par in root.iter('%sSpecPar' % TAG_PREFIX):
current_detector = spec_par.attrib['name']
ordered_keys.append(current_detector)
for parameter in spec_par.iter('%sParameter' % TAG_PREFIX):
updated_current_detector_node = root_updated.find(".//%sSpecPar[@name='%s']" % (TAG_PREFIX,current_detector))
if updated_current_detector_node:
for child in updated_current_detector_node:
name = child.get('name', None)
if name and name == parameter.attrib['name']:
differences.setdefault(current_detector, {}).setdefault(name, [float(parameter.attrib['value']),
float(child.attrib['value']),
((float(child.attrib['value'])-float(parameter.attrib['value']))
/float(parameter.attrib['value'])*100.)]
)
else:
print('Element not found: %s' % current_detector)
for group in differences.keys():
header.write(' m_diff["%s"] = std::make_pair<float, float>(%f, %f);\n' % (group,
differences[group]['TrackerRadLength'][2],
differences[group]['TrackerXi'][2]))
for group in differences.keys():
header.write(' m_values["%s"] = std::make_pair<float, float>(%f, %f);\n' % (group,
differences[group]['TrackerRadLength'][1],
differences[group]['TrackerXi'][1]))
# pprint.pprint(differences)
for i in xrange(len(ordered_keys)):
key = ordered_keys[i]
if format_for_twiki:
print("| %s | %f | %f | %f%% | %f | %f | %f%% |" % (key,
differences[key]['TrackerRadLength'][0],
differences[key]['TrackerRadLength'][1],
differences[key]['TrackerRadLength'][2],
differences[key]['TrackerXi'][0],
differences[key]['TrackerXi'][1],
differences[key]['TrackerXi'][2]
))
else:
print("%s %f %f %f%% %f %f %f%%" % (key,
differences[key]['TrackerRadLength'][0],
differences[key]['TrackerRadLength'][1],
differences[key]['TrackerRadLength'][2],
differences[key]['TrackerXi'][0],
differences[key]['TrackerXi'][1],
differences[key]['TrackerXi'][2]
))
header.write(TRAILER)
header.close
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Easily manipulate and inspect XML files related to Tracking Material.')
parser.add_argument('-p', '--produce', action='store_true',
default=False,
help='Produce a trackerRecoMaterial.xml starting from the paramters.xml file produced by the trackingMaterialProducer.')
parser.add_argument('-c', '--compare', action='store_true',
default=False,
help='Compares a local trackerRecoMaterial.xml against the one bundled with the release.')
parser.add_argument('-w', '--twiki', action='store_true',
default=False,
help="""Compares a local trackerRecoMaterial.xml against the one bundled
with the release and produces and output that is Twiki compatible
to be put into a table.""")
args = parser.parse_args()
checkEnvironment()
getTrackerRecoMaterialCopy('trackerRecoMaterialFromRelease.xml')
if args.produce:
produceXMLFromParameterFile()
if args.compare or args.twiki:
compareNewXMLWithOld(args.twiki)
|
from bottle import run, get, view, post, request
import json
import jwt
import requests
##############################
@get("/company")
@view("index_company.html")
def do():
return dict(company_name="SUPER")
@get("/company-token")
@view("index_company_token.html")
def do():
return dict(company_name="Token stuff")
@post("/get-name-by-cpr")
def do():
# Connect to db
# Execute a SQL/Document query
data_from_client = json.load(request.body)
print("cpr", data_from_client)
cpr = data_from_client['cpr']
file_name = "./data/" + cpr + ".txt" # In python you go from the root
opened_file = open(file_name, "r")
return opened_file.read()
@post("/process-jwt-token")
def do():
result = ""
try:
token = json.load(request.body)["jwt"]
try:
result = jwt.decode(
token, "jwt-secret-key", algorithms=["HS256"])
except Exception as jwt_error:
send_sms(jwt_error)
try:
email = result["email"]
except Exception as emailException:
send_sms("Email missing")
except Exception as json_error:
send_sms(json_error)
return str(result)
def send_sms(message):
endpoint = "https://fatsms.com/api-send-sms"
phone = "42659183"
my_api_key = "7893f0d6872d606467a9e0e3a998d8db"
data_dict = {"to_phone": phone, "api_key": my_api_key, "message": message}
requests.post(endpoint, data = data_dict)
print(str(data_dict))
##############################
run(host="127.0.0.1", port=4444, debug=True, reloader=True, server="paste")
|
"""
Decoding pipeline.
"""
import argparse
import cPickle
import logging
import numpy
import os
import pprint
import re
import theano
import time
import importlib
from theano import tensor
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
from mcg.models import EncoderDecoder, MultiEncoder, MultiDecoder
from mcg.sampling import gen_sample
from mcg.utils import get_enc_dec_ids, p_, seqs2words, words2seqs
from multiprocessing import Process, Queue
from subprocess import Popen, PIPE
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('translate')
def get_parser():
def dict_type(ss):
return dict([map(str.strip, s.split(':'))
for s in ss.split(',')])
parser = argparse.ArgumentParser()
parser.add_argument('--num-process', '-p', type=int, default=5,
help="Number of process to use for decoding")
parser.add_argument('--config', type=str, default="config",
help="model config for translation")
parser.add_argument('--proto', type=str,
help="Model prototype from config")
parser.add_argument('--normalize', '-n', action="store_true",
default=True, help="Normalize with seq-len")
parser.add_argument('--char-level', '-c', action="store_true",
default=False, help="Character level")
parser.add_argument('--cgs-to-translate', type=lambda s: s.split(','),
help='comma separeted string of cg names\
eg. --cgs-to-translate=fi_en,de_en')
parser.add_argument('--n-best', type=int, default=1)
parser.add_argument('--zero-shot', action="store_true", default=False,
help="Experimental")
parser.add_argument('--test', action="store_true", default=False,
help="Append _test while decoding")
parser.add_argument('--gold-files', type=dict_type,
help="Groundtruth files (optional), \
eg. --gold-files=fi_en:file1,de_en:file2")
parser.add_argument('--source-files', type=dict_type,
help="Source files (optional), \
eg. --source-files=fi_en:file1,de_en:file2")
parser.add_argument("--changes", type=dict_type,
help="Changes to config")
parser.add_argument('model', type=str)
return parser
def calculate_bleu(bleu_script, trans, gold):
multibleu_cmd = ['perl', bleu_script, gold, '<']
mb_subprocess = Popen(multibleu_cmd, stdin=PIPE, stdout=PIPE)
print >> mb_subprocess.stdin, '\n'.join(trans)
mb_subprocess.stdin.flush()
mb_subprocess.stdin.close()
stdout = mb_subprocess.stdout.readline()
logger.info(stdout)
out_parse = re.match(r'BLEU = [-.0-9]+', stdout)
assert out_parse is not None
bleu_score = float(out_parse.group()[6:])
mb_subprocess.terminate()
return bleu_score
def _translate(seq, f_init, f_next, trg_eos_idx, src_sel, trg_sel,
k, cond_init_trg, normalize, n_best, **kwargs):
sample, score = gen_sample(
f_init, f_next, x=numpy.array(seq).reshape([len(seq), 1]),
eos_idx=trg_eos_idx, src_selector=src_sel, trg_selector=trg_sel,
k=k, maxlen=3*len(seq), stochastic=False, argmax=False,
cond_init_trg=cond_init_trg, **kwargs)
if normalize:
lengths = numpy.array([len(s) for s in sample])
score = score / lengths
if n_best == 1:
sidx = numpy.argmin(score)
elif n_best > 1:
sidx = numpy.argsort(score)[:n_best]
else:
raise ValueError('n_best cannot be negative!')
return sample[sidx], score[sidx]
def translate_model(queue, rqueue, pid, f_init, f_next, src_sel, trg_sel,
trg_eos_idx, k, normalize, cond_init_trg, n_best,
**kwargs):
while True:
req = queue.get()
if req is None:
break
idx, x = req[0], req[1]
print pid, '-', idx
seq, scores = _translate(x, f_init, f_next, trg_eos_idx, src_sel,
trg_sel, k, cond_init_trg, normalize, n_best,
**kwargs)
rqueue.put((idx, seq, scores))
return
def main(config, model, normalize=False, n_process=5, chr_level=False,
cgs_to_translate=None, n_best=1, zero_shot=False, test=False):
trng = RandomStreams(config['seed'] if 'seed' in config else 1234)
enc_ids, dec_ids = get_enc_dec_ids(config['cgs'])
iternum = re.search('(?<=iter)[0-9]+', model)
# Translate only the chosen cgs if they are valid
if cgs_to_translate is None:
cgs_to_translate = config['cgs']
# Check if computational graphs are valid
if not set(config['cgs']) >= set(cgs_to_translate) and not zero_shot:
raise ValueError('{} not a subset of {}!'.format(
cgs_to_translate, config['cgs']))
# Check if zero shot computational graph is valid
if zero_shot:
if len(cgs_to_translate) > 1:
raise ValueError('Only one cg can be translated for zero shot')
if p_(cgs_to_translate[0])[0] not in enc_ids or \
p_(cgs_to_translate[0])[1] not in dec_ids:
raise ValueError('Zero shot is not valid for {}'
.format(cgs_to_translate[0]))
config['cgs'] += cgs_to_translate
# Create Theano variables
floatX = theano.config.floatX
src_sel = tensor.matrix('src_selector', dtype=floatX)
trg_sel = tensor.matrix('trg_selector', dtype=floatX)
x_sampling = tensor.matrix('source', dtype='int64')
y_sampling = tensor.vector('target', dtype='int64')
prev_state = tensor.matrix('prev_state', dtype=floatX)
# Create encoder-decoder architecture
logger.info('Creating encoder-decoder')
enc_dec = EncoderDecoder(
encoder=MultiEncoder(enc_ids=enc_ids, **config),
decoder=MultiDecoder(**config))
# Allocate parameters
enc_dec.init_params()
# Build sampling models
logger.info('Building sampling models')
f_inits, f_nexts, f_next_states = enc_dec.build_sampling_models(
x_sampling, y_sampling, src_sel, trg_sel, prev_state, trng=trng)
# Load parameters
logger.info('Loading parameters')
enc_dec.load_params(model)
# Output translation file names to be returned
translations = {}
# Iterate over computational graphs
for cg_name in f_inits.keys():
enc_name = p_(cg_name)[0]
dec_name = p_(cg_name)[1]
enc_idx = enc_ids.index(enc_name)
dec_idx = dec_ids.index(dec_name)
f_init = f_inits[cg_name]
f_next = f_nexts[cg_name]
f_next_state = f_next_states.get(cg_name, None)
# For monolingual paths do not perform any translations
if enc_name == dec_name or cg_name not in cgs_to_translate:
logger.info('Passing the validation of computational graph [{}]'
.format(cg_name))
continue
logger.info('Validating computational graph [{}]'.format(cg_name))
# Change output filename
if zero_shot:
config['val_set_outs'][cg_name] += '_zeroShot'
# Get input and output file names
source_file = config['val_sets'][cg_name]
saveto = config['val_set_outs'][cg_name]
saveto = saveto + '{}_{}'.format(
'' if iternum is None else '_iter' + iternum.group(),
'nbest' if n_best > 1 else 'BLEU')
# pass if output exists
if len([ff for ff in os.listdir(config['saveto'])
if ff.startswith(os.path.basename(saveto))]):
logger.info('Output file {}* exists, skipping'.format(saveto))
continue
# Prepare source vocabs and files, make sure special tokens are there
src_vocab = cPickle.load(open(config['src_vocabs'][enc_name]))
src_vocab['<S>'] = 0
src_vocab['</S>'] = config['src_eos_idxs'][enc_name]
src_vocab['<UNK>'] = config['unk_id']
# Invert dictionary
src_ivocab = dict()
for kk, vv in src_vocab.iteritems():
src_ivocab[vv] = kk
# Prepare target vocabs and files, make sure special tokens are there
trg_vocab = cPickle.load(open(config['trg_vocabs'][dec_name]))
trg_vocab['<S>'] = 0
trg_vocab['</S>'] = config['trg_eos_idxs'][dec_name]
trg_vocab['<UNK>'] = config['unk_id']
# Invert dictionary
trg_ivocab = dict()
for kk, vv in trg_vocab.iteritems():
trg_ivocab[vv] = kk
def _send_jobs(fname):
with open(fname, 'r') as f:
for idx, line in enumerate(f):
x = words2seqs(
line, src_vocab,
vocab_size=config['src_vocab_sizes'][enc_name],
chr_level=chr_level)
queue.put((idx, x))
return idx+1
def _finish_processes():
for midx in xrange(n_process):
queue.put(None)
def _retrieve_jobs(n_samples):
trans = [None] * n_samples
scores = [None] * n_samples
for idx in xrange(n_samples):
resp = rqueue.get()
trans[resp[0]] = resp[1]
scores[resp[0]] = resp[2]
if numpy.mod(idx, 10) == 0:
print 'Sample ', (idx+1), '/', n_samples, ' Done'
return trans, scores
# Create source and target selector vectors
src_selector_input = numpy.zeros(
(1, enc_dec.num_encs)).astype(theano.config.floatX)
src_selector_input[0, enc_idx] = 1.
trg_selector_input = numpy.zeros(
(1, enc_dec.num_decs)).astype(theano.config.floatX)
trg_selector_input[0, dec_idx] = 1.
# Actual translation here
logger.info('Translating ' + source_file + '...')
val_start_time = time.time()
if n_process == 1:
trans = []
scores = []
with open(source_file, 'r') as f:
for idx, line in enumerate(f):
if idx % 100 == 0 and idx != 0:
logger.info('...translated [{}] lines'.format(idx))
seq = words2seqs(
line, src_vocab,
vocab_size=config['src_vocab_sizes'][enc_name],
chr_level=chr_level)
_t, _s = _translate(
seq, f_init, f_next, trg_vocab['</S>'],
src_selector_input, trg_selector_input,
config['beam_size'],
config.get('cond_init_trg', False),
normalize, n_best, f_next_state=f_next_state)
trans.append(_t)
scores.append(_s)
else:
# Create queues
queue = Queue()
rqueue = Queue()
processes = [None] * n_process
for midx in xrange(n_process):
processes[midx] = Process(
target=translate_model,
args=(queue, rqueue, midx, f_init, f_next,
src_selector_input, trg_selector_input,
trg_vocab['</S>'], config['beam_size'], normalize,
config.get('cond_init_trg', False), n_best),
kwargs={'f_next_state': f_next_state})
processes[midx].start()
n_samples = _send_jobs(source_file)
trans, scores = _retrieve_jobs(n_samples)
_finish_processes()
logger.info("Validation Took: {} minutes".format(
float(time.time() - val_start_time) / 60.))
# Prepare translation outputs and calculate BLEU if necessary
# Note that, translations are post processed for BPE here
if n_best == 1:
trans = seqs2words(trans, trg_vocab, trg_ivocab)
trans = [tt.replace('@@ ', '') for tt in trans]
bleu_score = calculate_bleu(
bleu_script=config['bleu_script'], trans=trans,
gold=config['val_set_grndtruths'][cg_name])
saveto += '{}'.format(bleu_score)
else:
n_best_trans = []
for idx, (n_best_tr, score_) in enumerate(zip(trans, scores)):
sentences = seqs2words(n_best_tr, trg_vocab, trg_ivocab)
sentences = [tt.replace('@@ ', '') for tt in sentences]
for ids, trans_ in enumerate(sentences):
n_best_trans.append(
'|||'.join(
['{}'.format(idx), trans_,
'{}'.format(score_[ids])]))
trans = n_best_trans
# Write to file
with open(saveto, 'w') as f:
print >>f, '\n'.join(trans)
translations[cg_name] = saveto
return translations, saveto
if __name__ == "__main__":
args = get_parser().parse_args()
configuration = importlib.import_module(
args.config.split('.')[0] if '.py' in args.config else args.config)
config = getattr(configuration, args.proto)().copy()
if args.changes is not None:
config.update(args.changes)
# Set source and gold files in config if provided
if args.source_files is not None:
for cg_name, s_file in args.source_files.items():
config['val_sets'][cg_name] = s_file
if args.test:
config['val_set_outs'][cg_name] = \
os.path.join(config['saveto'],
os.path.basename(s_file) + '_test_out')
else:
config['val_set_outs'][cg_name] = s_file + '_validation_out'
if args.gold_files is not None:
for cg_name, g_file in args.gold_files.items():
config['val_set_grndtruths'][cg_name] = g_file
logger.info("Model options:\n{}".format(pprint.pformat(config)))
main(config, args.model, normalize=args.normalize,
n_process=args.num_process, chr_level=args.char_level,
cgs_to_translate=args.cgs_to_translate, n_best=args.n_best,
zero_shot=args.zero_shot, test=args.test)
|
# Generated by Django 2.2.13 on 2020-09-22 19:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("resources_portal", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="address", name="address_line_2", field=models.TextField(null=True),
),
]
|
import mnist
from load_data.ILoadSupervised import ILoadSupervised, SupervisedType
__all__ = ["LoadMnist",]
fashion_path = 'train_data/Folder_Images_Supervised/fashionmnist'
kuzushiji_path = 'train_data/Folder_Images_Supervised/kuzushiji'
class LoadMnist(ILoadSupervised):
#fashion: mnist_path='train_data\\Folder_Images_Supervised\\fashionmnist'
def __init__(self, mnist_path='train_data/Folder_Images_Supervised/mnist'):
self.TYPE = SupervisedType.Classification
self.mndata = mnist.parse_idx(mnist_path)
self.XTrain, self.YTrain = self.mndata.load_training()
self.XTest, self.YTest = self.mndata.load_testing()
#print(mndata.display(images[index]))
self.headers = [str(i) for i in range(len(self.XTest[0]))]
self.classes = [str(i) for i in list(set(self.YTrain))]
def get_classes(self):
return self.classes
def get_headers(self):
return self.headers
def get_default(self):
return self.XTrain, self.YTrain
def get_splited(self):
return (self.XTrain, self.YTrain), (self.XTest, self.YTest)
def get_all(self):
Xs = []
Ys = []
for i_train in range(len(self.XTrain)):
Xs.append(self.XTrain[i_train])
Ys.append(self.YTrain[i_train])
for i_test in range(len(self.XTest)):
Xs.append(self.XTest[i_test])
Ys.append(self.YTest[i_test])
return Xs, Ys
#call gen_image(x[0]).show()
def gen_image(arr):
from matplotlib import pyplot as plt
import numpy as np
two_d = (np.reshape(arr, (28, 28)) * 255).astype(np.uint8)
plt.imshow(two_d, cmap='gray', interpolation='nearest')
return plt |
'''
Created on Aug 23, 2013
@author: Brad
'''
import unittest
from utils.config import Config
class Test(unittest.TestCase):
conf = None
def setUp(self):
self.conf = Config()
def tearDown(self):
self.conf = None
def test_get_picture_vals_VISUALLY(self):
'''
Since these values can be changed at any time, you can visually read
the src/picam.config file and compare it to this output.
'''
print(self.conf.get_picture_vals())
def test_get_log_vals_VISUALLY(self):
'''
Since these values can be changed at any time, you can visually read
the src/picam.config file and compare it to this output.
'''
print(self.conf.get_log_vals())
def test_get_logging_vals(self):
'''
Since these values can be changed at any time, you can visually read
the src/picam.config file and compare it to this output.
'''
self.log_dir = 'some_phony_value'
actual_value = '../log/'
log_vals = self.conf.get_log_vals()
for (key, val) in log_vals:
setattr(self, key, val)
self.assertEqual(self.log_dir, actual_value, 'Should be the same values.')
def test_NameError_in_get_vals(self):
'''
If a value other than pictures, video, or logging is used
in __get_vals(), a NameError Exception should be thrown.
'''
error_raised = False
try:
# Note: you must use this syntax to access a private function.
self.conf._Config__get_vals('some_bad_value')
except NameError as ne:
error_raised = True
self.assertEqual(True, error_raised, 'NameError should have been raised.')
print(ne)
return
self.assertFalse(error_raised, 'Should NOT get here.')
def test_for_NO_NameError_in_get_vals(self):
'''
If a value other than pictures, video, or logging is used
in __get_vals(), a NameError Exception should be thrown.
'''
error_raised = False
try:
# Note: you must use this syntax to access a private function.
self.conf._Config__get_vals('pictures')
self.conf._Config__get_vals('video')
self.conf._Config__get_vals('logging')
except NameError as ne:
error_raised = True
self.assertEqual(True, error_raised, 'Should NOT get here.')
print(ne)
return
self.assertFalse(error_raised, 'NameError should NOT have been raised.')
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
from onegov.ticket.handler import Handler, HandlerRegistry
handlers = HandlerRegistry() # noqa
from onegov.ticket.model import Ticket
from onegov.ticket.model import TicketPermission
from onegov.ticket.collection import TicketCollection
__all__ = [
'Handler',
'handlers',
'Ticket',
'TicketCollection',
'TicketPermission'
]
|
import kornia.augmentation as K
import torch.nn as nn
from torch.nn.modules.utils import _pair
from ..common import (crop_and_resize, get_crop_grid, get_random_crop_bbox,
images2video, video2images)
from ..registry import TRACKERS
from .vanilla_tracker import VanillaTracker
@TRACKERS.register_module()
class UVCTracker(VanillaTracker):
"""3D recognizer model framework."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stride = self.backbone.output_stride
if self.train_cfg is not None:
self.patch_img_size = _pair(self.train_cfg.patch_size)
self.patch_x_size = _pair(self.train_cfg.patch_size // self.stride)
if self.train_cfg.get('strong_aug', False):
self.aug = nn.Sequential(
K.RandomRotation(degrees=10),
# K.RandomResizedCrop(size=self.patch_size,
# scale=(0.7, 0.9),
# ratio=(0.7, 1.3)),
K.ColorJitter(
brightness=0.2, contrast=0.3, saturation=0.3, hue=0.1))
else:
self.aug = nn.Identity()
def crop_x_from_img(self, img, x, bboxes, crop_first):
if crop_first:
crop_x = self.extract_feat(
crop_and_resize(img, bboxes * self.stride,
self.patch_img_size))
else:
crop_x = crop_and_resize(x, bboxes, self.patch_x_size)
return crop_x
def forward_train(self, imgs, labels=None):
"""Defines the computation performed at every call when training."""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
batches, clip_len = imgs.size(0), imgs.size(2)
assert clip_len == 2
x = images2video(
self.extract_feat(self.aug(video2images(imgs))), clip_len)
ref_frame = imgs[:, :, 0].contiguous()
tar_frame = imgs[:, :, 1].contiguous()
ref_x = x[:, :, 0].contiguous()
tar_x = x[:, :, 1].contiguous()
# all bboxes are in feature space
ref_crop_bboxes, is_center_crop = get_random_crop_bbox(
batches,
self.patch_x_size,
ref_x.shape[2:],
device=x.device,
center_ratio=self.train_cfg.center_ratio)
ref_crop_x = self.crop_x_from_img(ref_frame, ref_x, ref_crop_bboxes,
self.train_cfg.img_as_ref)
ref_crop_grid = get_crop_grid(ref_frame, ref_crop_bboxes * self.stride,
self.patch_img_size)
if is_center_crop:
tar_bboxes = ref_crop_bboxes
else:
tar_bboxes = self.cls_head.get_tar_bboxes(ref_crop_x, tar_x)
tar_crop_x = self.crop_x_from_img(tar_frame, tar_x, tar_bboxes,
self.train_cfg.img_as_tar)
if is_center_crop:
ref_pred_bboxes = ref_crop_bboxes
else:
ref_pred_bboxes = self.cls_head.get_tar_bboxes(tar_crop_x, ref_x)
ref_pred_crop_x = self.crop_x_from_img(ref_frame, ref_x,
ref_pred_bboxes,
self.train_cfg.img_as_ref_pred)
ref_pred_crop_grid = get_crop_grid(ref_frame,
ref_pred_bboxes * self.stride,
self.patch_img_size)
loss = dict()
loss.update(self.cls_head.loss(ref_crop_x, tar_crop_x, 'ref_tar'))
loss.update(self.cls_head.loss(tar_crop_x, ref_pred_crop_x, 'tar_ref'))
loss['dist_bbox'] = self.cls_head.loss_bbox(
ref_pred_bboxes / self.patch_x_size[0],
ref_crop_bboxes / self.patch_x_size[0])
loss['loss_bbox'] = self.cls_head.loss_bbox(ref_crop_grid,
ref_pred_crop_grid)
return loss
def forward_dummy(self, imgs):
"""Used for computing network FLOPs.
See ``tools/analysis/get_flops.py``.
Args:
imgs (torch.Tensor): Input images.
Returns:
Tensor: Class score.
"""
imgs = imgs.reshape((-1, ) + imgs.shape[2:])
x = self.extract_feat(imgs)
outs = (self.cls_head(x), )
return outs
|
import argparse
import sys
def get_count(bed):
"""
Get number of lines in the bed file. If the chromosome,
contains any of these substrings, then don't count
"""
ignore_contigs = {'hs', 'GL', 'X', 'Y'}
return len(set([line for line in open(bed).readlines()
if all([ic not in line.split()[0] for ic in ignore_contigs])]))
# I realized how unnessecary this was after the fact...
parser = argparse.ArgumentParser()
parser.add_argument('--fid', type=str, dest='fid', required=True)
parser.add_argument('--unfiltered', type=str, dest='unfiltered', required=True)
parser.add_argument('--filtered_gt0', type=str, dest='filtered_gt0', required=True)
parser.add_argument('--filtered_gt1', type=str, dest='filtered_gt1', required=True)
parser.add_argument('--filtered_manta_tn', type=str, dest='filtered_manta_tn', required=True)
parser.add_argument('--filtered_gnomad', type=str, dest='filtered_gnomad', required=True)
parser.add_argument('--filtered_1kg', type=str, dest='filtered_1kg', required=True)
parser.add_argument('--truth_set', type=str, dest='truth_set', required=True)
parser.add_argument('--tp_gt0', type=str, dest='tp_gt0', required=True)
parser.add_argument('--tp_gt1', type=str, dest='tp_gt1', required=True)
parser.add_argument('--tp_manta_tn', type=str, dest='tp_manta_tn', required=True)
parser.add_argument('--tp_gnomad', type=str, dest='tp_gnomad', required=True)
parser.add_argument('--tp_1kg', type=str, dest='tp_1kg', required=True)
args = parser.parse_args()
args = vars(args)
truth = get_count(args['truth_set'])
unfiltered = get_count(args['unfiltered'])
filtering_methods = {
'filtered_gt0': 'tp_gt0',
'filtered_gt1': 'tp_gt1',
'filtered_manta_tn': 'tp_manta_tn',
'filtered_gnomad': 'tp_gnomad',
'filtered_1kg': 'tp_1kg'
}
for filtered_set, tp_set in filtering_methods.items():
filtered = get_count(args[filtered_set])
tp = get_count(args[tp_set])
fp = filtered - tp
tn = unfiltered - tp - fp
fn = truth - tp
print('\t'.join(map(str, [args['fid'], filtered_set, tp, fp, tn, fn])))
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
# 软件设计师上午题
class RkpassItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 软件设计师下午题
class rkpassAfterItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optionA = scrapy.Field() # 题目问题
optionB = scrapy.Field() # 题目问题
optionC = scrapy.Field() # 题目问题
optionD = scrapy.Field() # 题目问题
optionE = scrapy.Field() # 题目问题
optionAanswer = scrapy.Field() # 题目答案
optionAanswerImg = scrapy.Field() # 图片题目答案
optionBanswer = scrapy.Field() # 题目答案
optionBanswerImg = scrapy.Field() # 图片题目答案
optionCanswer = scrapy.Field() # 题目答案
optionCanswerImg = scrapy.Field() # 图片题目答案
optionDanswer = scrapy.Field() # 题目答案
optionDanswerImg = scrapy.Field() # 图片题目答案
optionEanswer = scrapy.Field() # 题目答案
optionEanswerImg = scrapy.Field() # 图片题目答案
field = scrapy.Field() # 场次
# 网络工程师上午题
class wlMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 信息系统监理师上午题
class xxMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 数据库系统工程师上午题
class sjkMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 软件评测师上午题
class rjpcsMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 嵌入式系统设计师上午题
class qrsMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 电子商务设计师上午题
class dzswMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 多媒体应用设计师上午题
class mediaMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 信息系统管理工程师上午题
class xxxtMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 信息安全工程师上午题
class xxaqMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 系统集成项目管理工程师上午题
class xtjcMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 系统规划与管理师上午题
class xtghMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 网络规划设计师上午题
class wlghMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 系统架构设计师上午题
class xtjgMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 系统分析师上午题
class xtfxMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 信息系统项目管理师上午题
class xtxtxmMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 程序员上午题
class cxyMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 信息处理技术员上午题
class xxclMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类
# 网络管理员
class wlglMorningItem(scrapy.Item):
question = scrapy.Field() # 题目
questionImg = scrapy.Field() # 题目图片
optiona = scrapy.Field() # 选项A
optionb = scrapy.Field() # 选项B
optionc = scrapy.Field() # 选项C
optiond = scrapy.Field() # 选项D
answer = scrapy.Field() # 答案
answeranalysis = scrapy.Field() # 答案解析
field = scrapy.Field() # 考试场次 20181代表2018上半年考试
questionNum = scrapy.Field() # 题号
knowledgeOne = scrapy.Field() # 当前题目一级分类
knowledgeTwo = scrapy.Field() # 当前题目二级分类 |
# Copyright 2020 The TensorFlow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the opengl rasterizer op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import six
import tensorflow as tf
from tensorflow_graphics.geometry.transformation import look_at
from tensorflow_graphics.rendering.camera import perspective
from tensorflow_graphics.rendering.opengl import gen_rasterizer_op as rasterizer
from tensorflow_graphics.util import test_case
# Empty vertex shader
test_vertex_shader = """
#version 460
void main() { }
"""
# Geometry shader that projects the vertices of visible triangles onto the image
# plane.
test_geometry_shader = """
#version 460
uniform mat4 view_projection_matrix;
layout(points) in;
layout(triangle_strip, max_vertices=3) out;
out layout(location = 0) vec3 position;
out layout(location = 1) vec3 normal;
out layout(location = 2) vec2 bar_coord;
out layout(location = 3) float tri_id;
in int gl_PrimitiveIDIn;
layout(binding=0) buffer triangular_mesh { float mesh_buffer[]; };
vec3 get_vertex_position(int i) {
int o = gl_PrimitiveIDIn * 9 + i * 3;
return vec3(mesh_buffer[o + 0], mesh_buffer[o + 1], mesh_buffer[o + 2]);
}
bool is_back_facing(vec3 v0, vec3 v1, vec3 v2) {
vec4 tv0 = view_projection_matrix * vec4(v0, 1.0);
vec4 tv1 = view_projection_matrix * vec4(v1, 1.0);
vec4 tv2 = view_projection_matrix * vec4(v2, 1.0);
tv0 /= tv0.w;
tv1 /= tv1.w;
tv2 /= tv2.w;
vec2 a = (tv1.xy - tv0.xy);
vec2 b = (tv2.xy - tv0.xy);
return (a.x * b.y - b.x * a.y) <= 0;
}
void main() {
vec3 v0 = get_vertex_position(0);
vec3 v1 = get_vertex_position(1);
vec3 v2 = get_vertex_position(2);
// Cull back-facing triangles.
if (is_back_facing(v0, v1, v2)) {
return;
}
normal = normalize(cross(v1 - v0, v2 - v0));
vec3 positions[3] = {v0, v1, v2};
for (int i = 0; i < 3; ++i) {
// gl_Position is a pre-defined size 4 output variable
gl_Position = view_projection_matrix * vec4(positions[i], 1);
bar_coord = vec2(i==0 ? 1 : 0, i==1 ? 1 : 0);
tri_id = gl_PrimitiveIDIn;
position = positions[i];
EmitVertex();
}
EndPrimitive();
}
"""
# Fragment shader that packs barycentric coordinates, triangle index, and depth
# map in a resulting vec4 per pixel.
test_fragment_shader = """
#version 420
in layout(location = 0) vec3 position;
in layout(location = 1) vec3 normal;
in layout(location = 2) vec2 bar_coord;
in layout(location = 3) float tri_id;
out vec4 output_color;
void main() {
output_color = vec4(bar_coord, tri_id, position.z);
}
"""
class RasterizerOPTest(test_case.TestCase):
def test_rasterize(self):
max_depth = 10
min_depth = 2
height = 480
width = 640
camera_origin = (0.0, 0.0, 0.0)
camera_up = (0.0, 1.0, 0.0)
look_at_point = (0.0, 0.0, 1.0)
fov = (60.0 * np.math.pi / 180,)
near_plane = (1.0,)
far_plane = (10.0,)
batch_shape = tf.convert_to_tensor(
value=(2, (max_depth - min_depth) // 2), dtype=tf.int32)
world_to_camera = look_at.right_handed(camera_origin, look_at_point,
camera_up)
perspective_matrix = perspective.right_handed(
fov, (float(width) / float(height),), near_plane, far_plane)
view_projection_matrix = tf.matmul(perspective_matrix, world_to_camera)
view_projection_matrix = tf.squeeze(view_projection_matrix)
# Generate triangles at different depths and associated ground truth.
tris = np.zeros((max_depth - min_depth, 9), dtype=np.float32)
gt = np.zeros((max_depth - min_depth, height, width, 2), dtype=np.float32)
for idx in range(max_depth - min_depth):
tris[idx, :] = (-100.0, 100.0, idx + min_depth, 100.0, 100.0,
idx + min_depth, 0.0, -100.0, idx + min_depth)
gt[idx, :, :, :] = (0, idx + min_depth)
# Broadcast the variables.
render_parameters = {
"view_projection_matrix":
("mat",
tf.broadcast_to(
input=view_projection_matrix,
shape=tf.concat(
values=(batch_shape,
tf.shape(input=view_projection_matrix)[-2:]),
axis=0))),
"triangular_mesh":
("buffer",
tf.reshape(
tris, shape=tf.concat(values=(batch_shape, (9,)), axis=0)))
}
# Reshape the ground truth.
gt = tf.reshape(
gt, shape=tf.concat(values=(batch_shape, (height, width, 2)), axis=0))
render_parameters = list(six.iteritems(render_parameters))
variable_names = [v[0] for v in render_parameters]
variable_kinds = [v[1][0] for v in render_parameters]
variable_values = [v[1][1] for v in render_parameters]
def rasterize():
return rasterizer.rasterize(
num_points=3,
variable_names=variable_names,
variable_kinds=variable_kinds,
variable_values=variable_values,
output_resolution=(width, height),
vertex_shader=test_vertex_shader,
geometry_shader=test_geometry_shader,
fragment_shader=test_fragment_shader,
)
result = rasterize()
self.assertAllClose(result[..., 2:4], gt)
@tf.function
def check_lazy_shape():
# Within @tf.function, the tensor shape is determined by SetShapeFn
# callback. Ensure that the shape of non-batch axes matches that of of
# the actual tensor evaluated in eager mode above.
lazy_shape = rasterize().shape
self.assertEqual(lazy_shape[-3:], list(result.shape)[-3:])
check_lazy_shape()
@parameterized.parameters(
("The variable names, kinds, and values must have the same size.",
["var1"], ["buffer", "buffer"], [[1.0], [1.0]],
tf.errors.InvalidArgumentError, ValueError),
("The variable names, kinds, and values must have the same size.",
["var1", "var2"], ["buffer"], [[1.0], [1.0]],
tf.errors.InvalidArgumentError, ValueError),
("The variable names, kinds, and values must have the same size.",
["var1", "var2"], ["buffer", "buffer"], [[1.0]],
tf.errors.InvalidArgumentError, ValueError),
("has an invalid batch", ["var1", "var2"], ["buffer", "buffer"],
[[1.0], [[1.0]]], tf.errors.InvalidArgumentError, ValueError),
("has an invalid", ["var1"], ["mat"], [[1.0]],
tf.errors.InvalidArgumentError, ValueError),
("has an invalid", ["var1"], ["buffer"], [1.0],
tf.errors.InvalidArgumentError, ValueError),
)
def test_invalid_variable_inputs(self, error_msg, variable_names,
variable_kinds, variable_values, error_eager,
error_graph_mode):
height = 1
width = 1
empty_shader_code = "#version 460\n void main() { }\n"
if tf.executing_eagerly():
error = error_eager
else:
error = error_graph_mode
with self.assertRaisesRegexp(error, error_msg):
self.evaluate(
rasterizer.rasterize(
num_points=0,
variable_names=variable_names,
variable_kinds=variable_kinds,
variable_values=variable_values,
output_resolution=(width, height),
vertex_shader=empty_shader_code,
geometry_shader=empty_shader_code,
fragment_shader=empty_shader_code))
if __name__ == "__main__":
test_case.main()
|
import requests
import urllib.request
import time
import discord
from bs4 import BeautifulSoup
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
def get_first_anchor_url(bs_element):
anchor_url = None
anchors = bs_element.select("a")
if len(anchors) > 0:
anchor_url = anchors[0].get("href")
return anchor_url
class WikipediaScraper(object):
name = "Wikipedia"
def __init__(self):
self.url = "https://en.wikipedia.org/wiki/Portal:Current_events"
def get_headlines(self, **kwargs):
response = requests.get(self.url)
soup = BeautifulSoup(response.text, "html.parser")
divs = soup.select("div")
headlines_div = list(filter(lambda div: div.get("aria-labelledby") == "Topics_in_the_news", divs))
if len(headlines_div) == 0:
return []
headline_div = headlines_div[0]
headlines_ul = headline_div.select("ul")
if len(headlines_ul) == 0:
return []
headlines_ul = headlines_ul[0]
headlines = headlines_ul.select("li")
headline_dicts = []
for headline in headlines:
anchor = None
anchors = headline.select("a")
if len(anchors) > 0:
anchor = anchors[0]
headline_dicts.append({
"title": headline.text,
"summary": headline.text,
"url": anchor.get("href")
})
return headline_dicts
class CNBCScraper(object):
name = "CNBC"
def __init__(self):
self.url = "https://www.cnbc.com/"
def get_headlines(self, **kwargs):
response = requests.get(self.url)
soup = BeautifulSoup(response.text, "html.parser")
divs = soup.select("div")
headlines_divs = list(filter(lambda div: "LatestNews-headline" in div.get("class", []), divs))
if len(headlines_divs) == 0:
return []
headline_dicts = []
for headline_div in headlines_divs:
anchor_url = None
anchors = headline_div.select("a")
if len(anchors) > 0:
anchor_url = anchors[0].get("href")
headline_dicts.append({
"title": headline_div.text,
"summary": headline_div.text,
"url": anchor_url
})
return headline_dicts
class EkantipurScraper(object):
name = "Ekantipur"
def __init__(self):
self.url = "https://ekantipur.com/"
def get_headlines(self, **kwargs):
response = requests.get(self.url)
soup = BeautifulSoup(response.text, "html.parser")
sections = soup.select("section")
headline_sections = list(filter(lambda section: "listLayout" in section.get("class", []), sections))
if len(headline_sections) == 0:
return []
headline_section = headline_sections[0]
headline_articles = headline_section.select("article")
headline_dicts = []
for headline_article in headline_articles:
anchor_url = None
anchors = headline_article.select("a")
if len(anchors) > 0:
anchor_url = anchors[0].get("href")
headline_dicts.append({
"title": headline_article.text,
"summary": headline_article.text,
"url": anchor_url
})
return headline_dicts
class VzgalyadScraper(object):
name = "Vzglyad"
def __init__(self):
self.url = "https://vz.ru/"
model_name = 'Helsinki-NLP/opus-mt-ru-en'
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
self.model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
def translate_to_en(self, text_lines_ru):
src_text_lines = []
for text_line_ru in text_lines_ru:
src_text = f">>en<< {text_lines_ru}"
src_text_lines.append(src_text)
tokenized_translated = self.model.generate(**self.tokenizer.prepare_seq2seq_batch(src_text_lines, return_tensors="pt"))
translated_lines = [self.tokenizer.decode(t, skip_special_tokens=True) for t in tokenized_translated]
return translated_lines
def get_headlines(self, translate_to_en=False):
response = requests.get(self.url)
soup = BeautifulSoup(response.text, "html.parser")
divs = soup.select("div")
mainnews_divs = list(filter(lambda d: "mainnews" in d.get("class", []), divs))
othernews_divs = list(filter(lambda d: "othnews" in d.get("class", []), divs))
if len(mainnews_divs) == 0 and len(othernews_divs) == 0:
return []
headline_texts = []
anchor_urls = []
for mainnews_div in mainnews_divs:
anchor_url = get_first_anchor_url(mainnews_div)
headline_text = mainnews_div.select("h1")[0].text.strip()
if translate_to_en:
headline_text = self.translate_to_en([headline_text])[0]
if headline_text.find("------------------") == -1:
headline_texts.append(headline_text)
anchor_urls.append(anchor_url)
for othernews_div in othernews_divs:
anchor_url = get_first_anchor_url(othernews_div)
headline_text = othernews_div.select("h4")[0].text.strip()
if translate_to_en:
headline_text = self.translate_to_en([headline_text])[0]
if headline_text.find("------------------") == -1:
headline_texts.append(headline_text)
anchor_urls.append(anchor_url)
headline_dicts = []
print(headline_texts)
for headline_text, anchor_url in zip(headline_texts, anchor_urls):
headline_dicts.append({
"title": headline_text,
"summary": headline_text,
"url": anchor_url
})
return headline_dicts
|
def insertionSort(b):
for i in range(1, len(b)):
up = b[i]
j = i - 1
while j >= 0 and b[j] > up:
b[j + 1] = b[j]
j -= 1
b[j + 1] = up
return b
def bucketSort(A):
n = len(A)
arr = []
for i in range(n):
arr.append([])
for i in range(0, n):
arr[int(n * A[i])].append(A[i])
for i in range(0, n):
arr[i] = insertionSort(arr[i])
k = 0
for i in range(n):
for j in range(len(arr[i])):
A[k] = arr[i][j]
k += 1
return A
x = [0.77, 0.16, 0.39, 0.26, 0.71, 0.95, 0.21, 0.12, 0.23, 0.68]
print("Sorted Array is:", bucketSort(x))
## OUTPUT:
'''
Sorted Array is: [0.12, 0.16, 0.21, 0.23, 0.26, 0.39, 0.68, 0.71, 0.77, 0.95]
'''
|
import torch
from typing import List, Tuple
from base.base_trainer import BaseTrainer
from utils.logger import setup_logger
from tqdm.auto import tqdm
logger = setup_logger(__name__)
class Trainer(BaseTrainer):
def __init__(
self,
model,
optimizer,
loss,
config,
device,
train_loader,
test_loader,
lr_scheduler=None,
batch_scheduler=False,
):
super().__init__(model, optimizer, loss, config, device)
self.train_loader = train_loader
self.test_loader = test_loader
self.lr_scheduler = lr_scheduler
self.batch_scheduler = batch_scheduler
def _train_epoch(self, epoch: int) -> List[Tuple]:
loss_coll = []
accuracy_coll = []
self.model.train()
train_loss = 0
total = 0
processed = 0
correct = 0
pbar = tqdm(self.train_loader)
for batch_idx, (data, target) in enumerate(pbar):
data = data.to(self.device)
target = target.to(self.device)
self.optimizer.zero_grad()
output = self.model(data)
loss = self.criterion(output, target)
loss.backward()
self.optimizer.step()
train_loss += loss.item()
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
processed += len(data)
# pbar.set_description(desc=f'epoch={epoch+batch_idx/len(pbar):.2f} | loss={train_loss/(batch_idx+1):.10f} | accuracy={100.*correct/total:.2f} {correct}/{total} | batch_id={batch_idx}')
pbar.set_description(
desc=f"Loss={loss.item():0.2f} Batch_id={batch_idx} Accuracy: {100*correct/processed:0.2f}% "
)
accuracy_coll.append(100 * correct / processed)
loss_coll.append(loss.data.cpu().numpy().item())
if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.OneCycleLR):
self.lr_scheduler.step()
torch.cuda.empty_cache()
if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.StepLR):
self.lr_scheduler.step()
logger.info(
f"Train Set: Epoch {epoch}, Average Loss {train_loss/len(self.train_loader):.5f}, Accuracy: {100*correct/total}% ({correct/total})"
)
return (loss_coll, accuracy_coll)
def _test_epoch(self, epoch: int) -> List[Tuple]:
loss_coll = []
accuracy_coll = []
self.model.eval()
test_loss = 0
total = 0
correct = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(self.test_loader):
data = data.to(self.device)
target = target.to(self.device)
output = self.model(data)
loss = self.criterion(output, target)
test_loss += loss.item()
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
logger.info(
f"Test Set: Epoch {epoch}, Average Loss {test_loss/len(self.test_loader):.5f}, Accuracy: {100*correct/total}% ({correct/total})"
)
loss_coll.append(test_loss / len(self.test_loader))
accuracy_coll.append(100 * correct / total)
return (loss_coll, accuracy_coll, test_loss)
|
"""Basic HTTP Server with dynamic page generation"""
from http import HTTPStatus
from http.server import BaseHTTPRequestHandler, HTTPServer
class Content:
"""HTTP Content wrapper"""
def __init__(self, template, mime):
self._template = template
self.content = self._template
self.mime = mime
def substitute(self, **kwargs):
self.content = self._template.format(**kwargs)
return self
class MyLittleJoomla(BaseHTTPRequestHandler):
"""Simple static webserver"""
page_tpl = Content(r"""
<!doctype html>
<html>
<head>
<link rel="stylesheet" type="text/css" href="/style.css" />
<script src="/script.js"></script>
</head>
<body>
<article>{content}</article>
</body>
</html>
""", 'text/html')
not_found = Content(r"""<h1>"{path}" not found!</h1>""", 'text/html')
content = Content(r"""
<h1>Philippov rules!</h1>
<p>Path: "{path}"</p>
<p>
This is a simple example.
</p>
<p>See also:
<ul>
<li><a href="_/bump">Bump</a></li>
<li><a href="_/jump">Jump</a></li>
<li><a href="nope">Nope</a></li>
</ul>
</p>
""", 'text/html')
style = Content(r"""
body {
border: 0;
font-family: Helvetica, Arial, sans-serif;
margin: 0;
padding: 0;
}
article {
border: 1px solid #ddd;
margin: 2em auto;
padding: 1em;
position: relative;
width: 80%;
}
""", 'text/css')
script = Content(r"""
document.addEventListener('DOMContentLoaded', function(e){
var article = document.querySelector("article");
var moveOn = function(){
article.style.left = ".1em";
};
var moveBack = function(){
article.style.left = "0";
};
setInterval(function(){
moveOn();
setTimeout(moveBack, Math.random() * 1000)
}, Math.random() * 10000);
});
""", 'application/javascript')
def do_GET(self):
code, content = self.dispatch(self.path)
self.send_response(code)
self.send_header('content-type', content.mime)
self.end_headers()
self.wfile.write(bytes(content.content, 'utf8'))
def dispatch(self, path) -> (HTTPStatus, Content):
if path == '/':
return HTTPStatus.OK, self.page_tpl.substitute(
content=self.content.substitute(path=path).content)
if path.startswith('/_/'):
return HTTPStatus.OK, self.page_tpl.substitute(content=path)
elif path == '/style.css':
return HTTPStatus.OK, self.style
elif path == '/script.js':
return HTTPStatus.OK, self.script
return HTTPStatus.NOT_FOUND, self.page_tpl.substitute(
content=self.not_found.substitute(path=path).content)
if __name__ == '__main__':
addr = ('localhost', 8888)
server = HTTPServer(addr, MyLittleJoomla)
print('Started at http://{}:{}'.format(addr[0], addr[1]))
server.serve_forever()
|
from classes_and_objects.exe.to_do_list.project.task import Task
class Section:
def __init__(self, name: str):
self.name = name
self.tasks = []
def add_task(self, new_task):
for task in self.tasks:
if task.name == new_task.name:
return f"Task is already Taskin the section {self.name}"
self.tasks.append(new_task)
return f"Task {new_task.details()} is added to the section"
def complete_task(self, task_name: str):
for task in self.tasks:
if task.name == task_name:
task.completed = True
return f"Completed task {task_name}"
return f"Could not find task with the name {task_name}"
def clean_section(self):
tasks_count = len(self.tasks)
for task in self.tasks:
if task.completed:
self.tasks.remove(task)
removed_tasks = tasks_count - len(self.tasks)
return f"Cleared {removed_tasks} tasks."
def view_section(self):
result = f"Section {self.name}:\n"
for task in self.tasks:
result += task.details() + "\n"
return result |
from sweepstakes_stack_manager import SweepstakesStackManager
from sweepstakes_queue_manager import SweepstakesQueueManager
import user_interface
class MarketingFirmCreator:
def __init__(self):
self.queue = SweepstakesQueueManager()
self.stack = SweepstakesStackManager()
def choose_manager(self):
manager = user_interface.get_string_input('Please choose manager type: Press: [1] for Queue or Press: [2] for Stack.')
if manager == 1:
SweepstakesQueueManager()
elif manager == 2:
SweepstakesStackManager()
else:
return
|
from dataclasses import dataclass
import json
import typing as t
from .ninja import EncryptedImageNinja
@dataclass()
class Password:
name: str
login: str
passphrase: str
def __str__(self) -> str:
return f"{self.name}({self.login})"
class ImageVault:
passwords: t.List[Password]
def __init__(self, path: str, password: str, for_write=False):
self.path = path
self.image_ninja = EncryptedImageNinja(self.path, password=password)
self.passwords = []
if not for_write:
self.passwords = self._from_bytes(self.image_ninja.read_message())
@classmethod
def _to_bytes(cls, passwords: t.List[Password]) -> bytes:
assert isinstance(passwords, list) and all(
isinstance(p, Password) for p in passwords
), f"Expected `{repr(passwords)}` to be of type List[Password]"
data = [[p.name, p.login, p.passphrase] for p in passwords]
return json.dumps(data).encode()
@classmethod
def _from_bytes(cls, data: bytes) -> t.List[Password]:
data = json.loads(data.decode())
assert isinstance(data, list) and all(
isinstance(p, list) and len(p) == 3 for p in data
), f"Expected `{repr(data)}` to be of type List[List[3]]"
return [Password(*p) for p in data]
def save(self, path: str = None):
self.image_ninja.hide_message(self._to_bytes(self.passwords))
self.image_ninja.save(path or self.path)
123
|
# Programação Orientada a Objetos
# AC03 ADS-EaD - Implementação de classes, herança, polimorfismo e lançamento de exceções.
#
# Email Impacta: bruno.rferreira@aluno.faculdadeimpacta.com.br
class Produto:
"""
Classe Produto: deve representar os elementos básicos de um produto.
"""
def __init__(self, nome, preco):
"""
Inicializa os atributos privados nome e preco.
Esses atributos não devem ser declarados diretamente no construtor. Ao invés
disso, utilize os setters "nome" e "preco" para inicializá-los indiretamente,
pois dessa forma eles serão validados (veja a descrição dos setters na sequência).
"""
self.nome = nome
self.preco = preco
@property
def nome(self):
"""
Property nome: devolve (retorna) o valor do atributo privado nome.
"""
return self.__nome
@property
def preco(self):
"""
Property preco: devolve (retorna) o valor do atributo privado preco.
"""
return self.__preco
@nome.setter
def nome(self, novo_nome):
"""
Setter nome: recebe um novo_nome e atualiza o valor do atributo privado
nome com esse valor.
Antes de modificar o valor do atributo privado nome, verifique se o tamanho
da string novo_nome é maior que zero. Se for igual a zero, lance uma exceção
do tipo ValueError.
"""
if not isinstance(novo_nome, str):
raise TypeError('nome deve ser do tipo str')
if len(novo_nome) <= 0:
raise ValueError('nome nao deve ser vazio')
self.__nome = novo_nome
@preco.setter
def preco(self, novo_preco):
"""
Setter preco: recebe um novo_preco e atualiza o valor do atributo privado
preco com esse valor.
Antes de modificar o valor do atributo privado preco, verifique se seu valor
é do tipo int ou do tipo float (utilize a função isinstance para fazer essa
verificação). Caso não seja de um desses tipos, lance uma exceção do tipo
TypeError. Caso novo_preco seja int ou float, verifique se seu valor é maior
ou igual a zero. Se for menor que zero (negativo), lance uma exceção do tipo
ValueError.
"""
if not isinstance(novo_preco, (int, float)):
raise TypeError('preco deve ser do tipo int ou float')
if novo_preco < 0:
raise ValueError('preco não deve ser negativo')
self.__preco = novo_preco
def calcular_preco_com_frete(self):
"""
Método que calcula o valor final do produto com o frete incluso.
Deve devolver (retornar) o valor do atributo privado preco.
"""
return self.preco
class ProdutoFisico(Produto):
"""
Classe ProdutoFisico: deve representar os elementos básicos de um produto físico.
Esta classe herda da classe Produto.
"""
PRECO_POR_KG = 5
"""
Preço adicional do frete por KG
"""
def __init__(self, nome, preco, peso):
"""
Inicializa nome e preco utilizando o construtor da superclasse Produto,
(use a função super()), e também inicializa o atributo privado peso.
O atributo privado peso não deve ser declarado diretamente no construtor.
Ao invés disso, utilize o setter "peso" para inicializá-lo indiretamente,
pois dessa forma ele será validado.
"""
super().__init__(nome, preco)
self.peso = peso
@property
def peso(self):
"""
Property peso: devolve (retorna) o valor do atributo privado peso.
"""
return self.__peso
@peso.setter
def peso(self, novo_peso):
"""
Setter peso: recebe um novo_peso e atualiza o valor do atributo privado
peso com esse valor (que representa o peso do produto em gramas).
Antes de modificar o valor do atributo privado peso, verifique se seu valor
é do tipo int (utilize a função isinstance para fazer essa verificação),
caso contrário lance uma exceção do tipo TypeError.
Caso novo_peso seja do tipo int, verifique se seu valor é maior que zero.
Se for menor ou igual a zero, lance uma exceção do tipo ValueError.
"""
if not isinstance(novo_peso, int):
raise TypeError('peso deve ser do tipo int')
if novo_peso <= 0:
raise ValueError('peso deve ser positivo')
self.__peso = novo_peso
def peso_em_kg(self):
"""
Método que calcula o peso do produto em quilogramas.
Deve devolver (retornar) o valor do peso convertido em quilogramas.
Exemplos:
- Se o valor do atributo privado peso for 1000, este método retorna 1;
- Se o valor do atributo privado peso for 7500, este método retorna 7.5;
- Se o valor do atributo privado peso for 600, este método retorna 0.6;
"""
return self.peso / 1000.0
def calcular_preco_com_frete(self):
"""
Método que calcula o valor final do produto físico com o frete incluso.
Para cada quilograma no peso do produto, acrescente R$5 ao seu valor final.
Deve devolver (retornar) o valor final do produto acrescido do frete (que depende
do peso do produto em quilogramas, conforme descrito acima).
Exemplos:
- Se o produto (preço) custa R$100 e seu peso é 1000 gramas, retorna R$105;
- Se o produto (preço) custa R$50 e seu peso é 2500 gramas, retorna R$62.5;
- Se o produto (preço) custa R$10 e seu peso é 100 gramas, retorna R$10.5;
"""
adicionais = self.peso_em_kg() * ProdutoFisico.PRECO_POR_KG
return super().calcular_preco_com_frete() + adicionais
class ProdutoEletronico(ProdutoFisico):
"""
Classe ProdutoEletronico: deve representar os elementos básicos de um produto eletrônico.
Esta classe herda da classe ProdutoFisico.
"""
ADICIONAL_FRETE = 1.01
"""
Multiplicador a ser utilizado para o calculo do frete, comparado com o frete do produto físico
"""
def __init__(self, nome, preco, peso, tensao, tempo_garantia):
"""
Inicializa nome, preco e peso utilizando o construtor da superclasse ProdutoFisico,
(use a função super()), e também inicializa os atributos privados tensao e
tempo_garantia da seguinte forma:
- O atributo privado tensao não deve ser declarado diretamente no
construtor. Ao invés disso, utilize o setter "tensao" para inicializá-lo
indiretamente, pois dessa forma ele será validado.
- O atributo privado tempo_garantia deve ser inicializado diretamente
no construtor, sem necessidade de validação.
"""
super().__init__(nome, preco, peso)
self.tensao = tensao
self.__tempo_garantia = tempo_garantia
@property
def tensao(self):
"""
Property tensao: devolve (retorna) o valor do atributo privado tensao.
"""
return self.__tensao
@property
def tempo_garantia(self):
"""
Property tempo_garantia: devolve (retorna) o valor do atributo privado tempo_garantia.
"""
return self.__tempo_garantia
@tensao.setter
def tensao(self, nova_tensao):
"""
Setter tensao: recebe uma nova_tensao e atualiza o valor do atributo privado
tensao com esse valor (que representa a tensão de um aparelho eletrônico,
com os seguintes valores possíveis: 0, indicando que o produto é bivolt,
127 ou 220).
Antes de modificar o valor do atributo privado tensao, verifique se seu valor
é do tipo int (utilize a função isinstance para fazer essa verificação),
caso contrário lance uma exceção do tipo TypeError.
Caso nova_tensao seja do tipo int, verifique se seu valor é igual a 0, ou
127 ou 220. Caso nova_tensao seja diferente desses valores, lance uma
exceção do tipo ValueError.
"""
if not isinstance(nova_tensao, int):
raise TypeError('tensao deve ser do tipo int')
if nova_tensao not in [0, 127, 220]:
raise ValueError('tensao deve ser 0, 127 ou 220')
self.__tensao = nova_tensao
def calcular_preco_com_frete(self):
"""
Método que calcula o valor final do produto eletrônico com o frete incluso.
O cálculo é o mesmo que o produto físico, mas deverá ser acrescido 1%
ao valor final do frete.
Dica: você pode reaproveitar o método calcular_preco_com_frete() da
superclasse (a classe ProdutoFisico), através da função super(). Ou seja,
obtenha o valor do frete do produto físico, depois acrescente 1% e devolva
(retorne) esse valor.
Deve devolver (retornar) o valor final do produto acrescido do frete (será
o mesmo valor com frete do produto físico, com o acréscimo de 1%).
Exemplos:
- Se o produto (preço) custa R$100 e seu peso é 1000 gramas, retorna R$106.05;
- Se o produto (preço) custa R$50 e seu peso é 2000 gramas, retorna R$60.6;
- Se o produto (preço) custa R$10 e seu peso é 800 gramas, retorna R$14.14;
"""
return super().calcular_preco_com_frete() * ProdutoEletronico.ADICIONAL_FRETE
class Ebook(Produto):
"""
Classe Ebook: deve representar os elementos básicos de um ebook (livro digital).
Esta classe herda da classe Produto.
"""
def __init__(self, nome, preco, autor, numero_paginas):
"""
Inicializa nome e preco utilizando o construtor da superclasse Produto,
(use a função super()), e também inicializa os atributos privados autor e
numero_paginas da seguinte forma:
- O atributo privado autor deve ser inicializado diretamente
no construtor, sem necessidade de validação.
- O atributo privado numero_paginas não deve ser declarado diretamente no
construtor. Ao invés disso, utilize o setter "numero_paginas" para
inicializá-lo indiretamente, pois dessa forma ele será validado.
"""
super().__init__(nome, preco)
self.__autor = autor
self.numero_paginas = numero_paginas
@property
def nome_exibicao(self):
"""
Property nome_exibicao: devolve (retorna) uma string com o nome e autor
do livro no seguinte formato (sem aspas): "Nome (Autor)"
Exemplos:
- Se nome é "Aprendendo Python" e autor é "Ana Maria", deve devolver (retornar)
uma string com: "Aprendendo Python (Ana Maria)";
- Se nome é "O senhor dos anéis" e autor é "J. R. R. Tolkien", deve
devolver (retornar) uma string com: "O senhor dos anéis (J. R. R. Tolkien)";
"""
return "{nome} ({autor})".format(nome=self.nome, autor=self.__autor)
@property
def numero_paginas(self):
"""
Property numero_paginas: devolve (retorna) o valor do atributo
privado numero_paginas.
"""
return self.__numero_paginas
@numero_paginas.setter
def numero_paginas(self, valor):
"""
Setter numero_paginas: recebe um valor e atualiza o atributo privado
numero_paginas com esse valor.
Antes de modificar o valor do atributo privado numero_paginas, verifique
se o valor é maior que zero. Caso contrário (se valor for menor ou igual
a zero), lance um erro do tipo ValueError.
"""
if valor <= 0:
raise ValueError('numero_paginas deve ser positivo')
self.__numero_paginas = valor
|
import ants
import antspynet
import tensorflow as tf
import numpy as np
from superiq import super_resolution_segmentation_per_label
from superiq import list_to_string
template = antspynet.get_antsxnet_data( "biobank" )
template = ants.image_read( template )
template = template * antspynet.brain_extraction( template )
img = ants.image_read( "/Users/stnava/code/super_resolution_pipelines/data/ADNI-127_S_0112-20160205-T1w-001-brain_ext-bxtreg_n3.nii.gz" )
if not 'rig' in locals():
rig = ants.registration( template, img, "Affine" )
rigi = rig['warpedmovout']
valid_labels = (
"4:left lateral ventricle", "5:left inferior lateral ventricle",
"6:left cerebellem exterior", "7:left cerebellum white matter",
"10:left thalamus proper", "11:left caudate",
"12:left putamen", "13:left pallidium",
"15:4th ventricle", "16:brain stem",
"17:left hippocampus", "18:left amygdala",
"24:CSF", "25:left lesion", "26:left accumbens area", "28:left ventral DC",
"30:left vessel",
"43:right lateral ventricle", "44:right inferior lateral ventricle",
"45:right cerebellum exterior", "46:right cerebellum white matter",
"49:right thalamus proper", "50:right caudate",
"51:right putamen", "52:right palladium",
"53:right hippocampus",
"54:right amygdala",
"57:right lesion",
"58:right accumbens area",
"60:right ventral DC",
"62:right vessel",
"72:5th ventricle", "85:optic chasm",
"91:left basal forebrain",
"92:right basal forebrain",
"630:cerebellar vermal lobules I-V",
"631:cerebellar vermal lobules VI-VII",
"632:cerebellar vermal lobules VIII-X",
"1002:left caudal anterior cingulate",
"1003:left caudal middle frontal",
"1005:left cuneus",
"1006:left entorhinal",
"1007:left fusiform",
"1008:left inferior parietal",
"1009:left inferior temporal",
"1010:left isthmus cingulate",
"1011:left lateral occipital",
"1012:left lateral orbitofrontal",
"1013:left lingual",
"1014:left medial orbitofrontal",
"1015:left middle temporal",
"1016:left parahippocampal",
"1017:left paracentral",
"1018:left pars opercularis",
"1019:left pars orbitalis",
"1020:left pars triangularis",
"1021:left pericalcarine",
"1022:left postcentral",
"1023:left posterior cingulate",
"1024:left precentral",
"1025:left precuneus",
"1026:left rostral anterior cingulate",
"1027:left rostral middle frontal",
"1028:left superior frontal",
"1029:left superior parietal",
"1030:left superior temporal",
"1031:left supramarginal",
"1034:left transverse temporal",
"1035:left insula",
"2002:right caudal anterior cingulate",
"2003:right caudal middle frontal",
"2005:right cuneus",
"2006:right entorhinal",
"2007:right fusiform",
"2008:right inferior parietal",
"2009:right inferior temporal",
"2010:right isthmus cingulate",
"2011:right lateral occipital",
"2012:right lateral orbitofrontal",
"2013:right lingual",
"2014:right medial orbitofrontal",
"2015:right middle temporal",
"2016:right parahippocampal",
"2017:right paracentral",
"2018:right pars opercularis",
"2019:right pars orbitalis",
"2020:right pars triangularis",
"2021:right pericalcarine",
"2022:right postcentral",
"2023:right posterior cingulate",
"2024:right precentral",
"2025:right precuneus",
"2026:right rostral anterior cingulate",
"2027:right rostral middle frontal",
"2028:right superior frontal",
"2029:right superior parietal",
"2030:right superior temporal",
"2031:right supramarginal",
"2034:right transverse temporal",
"2035:right insula" )
# see help for meaning of labels
if not 'dkt' in locals():
dkt = antspynet.desikan_killiany_tourville_labeling( rigi,
do_preprocessing=False,
return_probability_images=True ) # FIXME - use probability images later
if not 'segorigspace' in locals():
segorigspace = ants.apply_transforms( img, dkt['segmentation_image'],
rig['fwdtransforms'], whichtoinvert=[True], interpolator='genericLabel')
# OUTPUT: write the native resolution image => segorigspace
# and its label geometry csv
output_filename = "/tmp/deep_dkt/deep_dkt_" # + list_to_string(mysegnumbers)
output_filename_native = output_filename + "_OR_seg.nii.gz"
output_filename_native_csv = output_filename + "_OR_seg.csv"
ants.image_write( segorigspace, output_filename_native )
# NOTE: the code below is SR specific and should only be run in that is requested
########################################
mysegnumbers = [ 1006, 1007, 1015, 1016] # Eisai cortical regions, left
mysegnumbers = [ 2006, 2007, 2015, 2016] # Eisai cortical regions, right
# FIXME - check that mysegnumbers are in valid_labels
########################################
# find the right probability image
mdl = tf.keras.models.load_model( "models/SEGSR_32_ANINN222_3.h5" ) # FIXME - parameterize this
srseg = super_resolution_segmentation_per_label(
imgIn = img,
segmentation = segorigspace,
upFactor = [2,2,2],
sr_model = mdl,
segmentation_numbers = mysegnumbers,
dilation_amount = 6,
verbose = True
)
# writing ....
output_filename_sr = output_filename + list_to_string(mysegnumbers) + "_SR.nii.gz"
ants.image_write( srseg['super_resolution'], output_filename_sr )
output_filename_sr_seg = output_filename +list_to_string(mysegnumbers) + "_SR_seg.nii.gz"
ants.image_write(srseg['super_resolution_segmentation'], output_filename_sr_seg )
output_filename_sr_seg_csv = output_filename + list_to_string(mysegnumbers) + "_SR_seg.csv"
# FIXME: write csv here
|
#!/usr/bin/python
"""
This code will 'score' input against a 'trained'
frequency distribution.
"""
import random
#Trainable model
model_norm = [0.0] * 256
model_raw = [0.0] * 256
model_normalized = False
def resetModel():
global model_norm
global model_raw
global model_normalized
model_norm = [0.0] * 256
model_raw = [0.0] * 256
model_normalized = False
def meanSquareError(normHist):
global model_norm
totalError = 0.0
temp = 0.0
for i in range(255):
temp = normHist[i] - model_norm[i]
totalError += temp * temp #square the error
return totalError
def finalizeModel():
global model_raw
global model_norm
global model_normalized
total = 0.0
for i in range(255):
total += model_raw[i]
if total <= 0.0:
total = 1.0
for i in range(255):
model_norm[i] = model_raw[i] / total
model_norm[i] *= 100
model_normalized = True
def updateModel(rawbytes):
global model_raw
global model_normalized
model_normalized = False
for byte in rawbytes:
byte = ord(byte)
model_raw[byte] += 1.0
def printModel():
global model_normalized
global model_norm
if False == model_normalized:
print "Error: Model not yet finalized."
exit()
print '[',
for i in range(255):
print model_norm[i],
print ',',
print '\b\b]'
def scoreInput(charHist):
total = 0.0
normalCharHist = [0.0] * 256
if(model_normalized == False):
print "Error: Training model not yet setup properly."
exit()
for i in range(255):
total += charHist[i]
if total <= 0.0:
total = 1.0
for i in range(255):
normalCharHist[i] = charHist[i] / total
normalCharHist[i] *= 100.0
return meanSquareError(normalCharHist)
#Testing below
if __name__ == "__main__":
f = open("tests/training_text.txt", "r")
example1 = f.read()
f.close()
#Build up model for text
resetModel()
updateModel(example1)
finalizeModel()
#printModel()
print "Testing against Text."
#Scorable input
charHist = [0.0] * 256
f = open("tests/trial_text.txt", "r")
trial = f.read()
f.close()
for byte in trial:
byte = ord(byte)
charHist[byte] += 1.0
print "Text: " + str(scoreInput(charHist))
charHist = [0.0] * 256
for i in range(20000):
charHist[random.randrange(0,255)] += 1.0
print "Random: " + str(scoreInput(charHist))
charHist = [0.0] * 256
f = open("tests/wikipedia_Sunset_2007-1.jpg", "r")
trial = f.read()
f.close()
for byte in trial:
byte = ord(byte)
charHist[byte] += 1.0
print "JPEG: " + str(scoreInput(charHist))
f = open("tests/wikipedia_Ash_Tree.jpg", "r")
example1 = f.read()
f.close()
#Build up model for jpeg
resetModel()
updateModel(example1)
finalizeModel()
#printModel()
print "Testing against JPEG."
#Scorable input
charHist = [0.0] * 256
f = open("tests/trial_text.txt", "r")
trial = f.read()
f.close()
for byte in trial:
byte = ord(byte)
charHist[byte] += 1.0
print "Text: " + str(scoreInput(charHist))
charHist = [0.0] * 256
for i in range(20000):
charHist[random.randrange(0,255)] += 1.0
print "Random: " + str(scoreInput(charHist))
charHist = [0.0] * 256
f = open("tests/wikipedia_Sunset_2007-1.jpg", "r")
trial = f.read()
f.close()
for byte in trial:
byte = ord(byte)
charHist[byte] += 1.0
print "JPEG: " + str(scoreInput(charHist))
|
from ecellManager import * |
import os
import shutil
from conans import ConanFile, CMake, tools
from future.moves import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
class CAresConan(ConanFile):
from gcc import GCC_VERSIONS, CURRENT_GCC_VERSION
with open(os.path.join(os.path.dirname(os.path.realpath(
__file__)), "VERSION.txt"), 'r') as version_file:
version = version_file.read()
requires = "gcc/{}@kapilsh/release".format(CURRENT_GCC_VERSION)
name = "cares"
license = "MIT"
url = "https://github.com/conan-community/conan-cares"
description = "A C library for asynchronous DNS requests"
homepage = "https://cares.haxx.se/"
settings = {"os": ["Linux"],
"compiler": {"gcc": {"version": GCC_VERSIONS}},
"arch": ["x86_64"],
"build_type": ["Debug", "Release"]}
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = "shared=False", "fPIC=True"
exports = "VERSION.txt", "../gcc.py"
generators = "cmake"
source_url = "https://github.com/c-ares/c-ares/archive"
def source(self):
ver = self.version.replace(".", "_")
tar_file = f"cares-{ver}.tar.gz"
tools.download("{}/{}".format(self.source_url, tar_file), tar_file)
tools.unzip(tar_file)
os.unlink(tar_file)
self.output.info(os.listdir("."))
shutil.move(f"c-ares-cares-{ver}", "cares")
def cmake_configure(self):
cmake = CMake(self)
cmake.definitions["CMAKE_BUILD_TYPE"] = self.settings.build_type
cmake.definitions["CARES_STATIC"] = not self.options.shared
cmake.definitions["CARES_SHARED"] = self.options.shared
cmake.definitions["CARES_BUILD_TESTS"] = "OFF"
cmake.configure(source_folder="cares")
return cmake
def build(self):
cmake = self.cmake_configure()
cmake.build()
cmake.install()
def package(self):
return
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
self.cpp_info.defines.append("CARES_STATICLIB")
self.env_info.LD_LIBRARY_PATH.append("{}/lib".format(
self.package_folder))
self.env_info.PATH.append(os.path.join(self.package_folder, "bin"))
self.cpp_info.includedirs = ['include']
self.env_info.CPATH.append("{}/include".format(self.package_folder))
|
from mamba import description, before, context, it
from doublex import Spy
from expects import expect, be_true, be_false
from doublex_expects import have_been_called, have_been_called_with
from mamba import reporter, runnable
from mamba.example import Example
from mamba.example_group import ExampleGroup
from spec.object_mother import an_example_group
TAG = 'any_tag'
TAGS = [TAG]
with description('Example execution using tags') as self:
with before.each:
self.reporter = Spy(reporter.Reporter)
self.example_group = an_example_group()
self.example_with_tags = Example(lambda x: x,
parent=self.example_group,
tags=TAGS)
self.other_example = Example(lambda x: x, parent=self.example_group)
with context('when tag is included in example tags'):
with it('executes example'):
self.example_with_tags.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.example_with_tags.was_run).to(be_true)
with context('when tag is not included in example tags'):
with it('does not execute example'):
self.other_example.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.other_example.was_run).to(be_false)
with context('when example group does not have tags and silbing one does'):
with it('skips example group without tags'):
self.parent = ExampleGroup('any example_group')
self.child = ExampleGroup('child example_group', tags=TAGS)
self.example = Example(lambda x: x)
self.child.append(self.example)
self.silbing = ExampleGroup('silbing example_group')
self.silbing.append(Example(lambda x: x))
self.parent.append(self.child)
self.parent.append(self.silbing)
self.parent.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.reporter.example_group_started).\
to(have_been_called.twice)
expect(self.reporter.example_group_started).\
to(have_been_called_with(self.parent))
expect(self.reporter.example_group_started).\
to(have_been_called_with(self.child))
with context('when checking if an example has a tag'):
with context('and example has the tag'):
with it('returns true'):
example = Example(lambda x: x, tags=TAGS)
expect(example.has_tag(TAG)).to(be_true)
with context('and does not contains the tag'):
with it('returns false'):
example = Example(lambda x: x)
expect(example.has_tag(TAG)).to(be_false)
with context('and parent example has the tag'):
with it('returns true'):
parent = ExampleGroup('any example_group', tags=TAGS)
example = Example(lambda x: x)
parent.append(example)
expect(example.has_tag(TAG)).to(be_true)
with context('and parent example has not the tag'):
with it('returns false'):
parent = ExampleGroup('any example_group')
example = Example(lambda x: x)
parent.append(example)
expect(example.has_tag(TAG)).to(be_false)
with context('when checking if is included in execution with tags'):
with context('and has the tag'):
with it('returns true'):
parent = ExampleGroup('any example_group', tags=TAGS)
expect(parent.included_in_execution(TAGS)).to(be_true)
with context('and does not has the tag'):
with it('returns false'):
parent = ExampleGroup('any example_group')
expect(parent.included_in_execution(TAGS)).to(be_false)
with context('and children has the tag'):
with it('return true'):
parent = ExampleGroup('any example_group')
example = Example(lambda x: x, tags=TAGS)
parent.append(example)
expect(parent.included_in_execution(TAGS)).to(be_true)
|
from pytest_bdd import given, when, then
from model.group import Group
@given('a group list', target_fixture="group_list")
# все шаги given рассматриваются как фикстуры, и поэтому можно их передавать как параметры в другие шаги
def group_list(db):
return db.get_group_list()
@given('a new group with <name>, <header> and <footer>', target_fixture="new_group")
def new_group(name, header, footer):
return Group(name=name, header=header, footer=footer)
@when('I add the group to the list')
def add_new_group(app, new_group):
app.group.create(new_group)
@then('the new group list is equal to the old list with the added group')
# функция group_list передаётся как параметр (фикстура)
def verify_group_added(db, group_list, new_group):
old_groups = group_list
new_groups = db.get_group_list()
old_groups.append(new_group)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max) |
from django.urls import path
from .views import VisualizerView
urlpatterns = [
path('<int:voting_id>/', VisualizerView.as_view()),
]
|
# Generated by Django 2.2.1 on 2019-07-01 17:47
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('prescription', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='prescription',
name='timestamp',
field=models.DateTimeField(verbose_name=datetime.datetime(2019, 7, 1, 17, 47, 20, 424861, tzinfo=utc)),
),
]
|
import datetime
import re
import dateutil.rrule as dr
import dateutil.parser as dp
import dateutil.relativedelta as drel
import OrgExtended.orgduration as orgduration
import calendar
def total_seconds(td):
"""Equivalent to `datetime.timedelta.total_seconds`."""
return float(td.microseconds +
(td.seconds + td.days * 24 * 3600) * 10 ** 6) / 10 ** 6
def total_minutes(td):
"""Alias for ``total_seconds(td) / 60``."""
return total_seconds(td) / 60
def gene_timestamp_regex(brtype, prefix=None, nocookie=False):
"""
Generate timetamp regex for active/inactive/nobrace brace type
:type brtype: {'active', 'inactive', 'nobrace'}
:arg brtype:
It specifies a type of brace.
active: <>-type; inactive: []-type; nobrace: no braces.
:type prefix: str or None
:arg prefix:
It will be appended to the head of keys of the "groupdict".
For example, if prefix is ``'active_'`` the groupdict has
keys such as ``'active_year'``, ``'active_month'``, and so on.
If it is None it will be set to ``brtype`` + ``'_'``.
:type nocookie: bool
:arg nocookie:
Cookie part (e.g., ``'-3d'`` or ``'+6m'``) is not included if
it is ``True``. Default value is ``False``.
>>> timestamp_re = re.compile(
... gene_timestamp_regex('active', prefix=''),
... re.VERBOSE)
>>> timestamp_re.match('no match') # returns None
>>> m = timestamp_re.match('<2010-06-21 Mon>')
>>> m.group()
'<2010-06-21 Mon>'
>>> '{year}-{month}-{day}'.format(**m.groupdict())
'2010-06-21'
>>> m = timestamp_re.match('<2005-10-01 Sat 12:30 +7m -3d>')
>>> from collections import OrderedDict
>>> sorted(m.groupdict().items())
... # doctest: +NORMALIZE_WHITESPACE
[('day', '01'),
('end_hour', None), ('end_min', None),
('hour', '12'), ('min', '30'),
('month', '10'),
('repeatdwmy', 'm'), ('repeatnum', '7'), ('repeatpre', '+'),
('warndwmy', 'd'), ('warnnum', '3'), ('warnpre', '-'), ('year', '2005')]
When ``brtype = 'nobrace'``, cookie part cannot be retrieved.
>>> timestamp_re = re.compile(
... gene_timestamp_regex('nobrace', prefix=''),
... re.VERBOSE)
>>> timestamp_re.match('no match') # returns None
>>> m = timestamp_re.match('2010-06-21 Mon')
>>> m.group()
'2010-06-21'
>>> '{year}-{month}-{day}'.format(**m.groupdict())
'2010-06-21'
>>> m = timestamp_re.match('2005-10-01 Sat 12:30 +7m -3d')
>>> sorted(m.groupdict().items())
... # doctest: +NORMALIZE_WHITESPACE
[('day', '01'),
('end_hour', None), ('end_min', None),
('hour', '12'), ('min', '30'),
('month', '10'), ('year', '2005')]
"""
if brtype == 'active':
(bo, bc) = ('<', '>')
elif brtype == 'inactive':
(bo, bc) = (r'\[', r'\]')
elif brtype == 'nobrace':
(bo, bc) = ('', '')
else:
raise ValueError("brtype='{0!r}' is invalid".format(brtype))
if brtype == 'nobrace':
ignore = r'[\s\w]'
else:
ignore = '[^{bc}]'.format(bc=bc)
if prefix is None:
prefix = '{0}_'.format(brtype)
regex_date_time = r"""
(?P<{prefix}year>\d{{4}}) -
(?P<{prefix}month>\d{{2}}) -
(?P<{prefix}day>\d{{2}})
( # optional time field
({ignore}+?)
(?P<{prefix}hour>\d{{2}}) :
(?P<{prefix}min>\d{{2}})
( # optional end time range
--?
(?P<{prefix}end_hour>\d{{2}}) :
(?P<{prefix}end_min>\d{{2}})
)?
)?
"""
regex_cookie = r"""
( # optional repeater
({ignore}+?)
(?P<{prefix}repeatpre> [\.\+]{{1,2}})
(?P<{prefix}repeatnum> \d+)
(?P<{prefix}repeatdwmy> [dwmy])
)?
( # optional warning
({ignore}+?)
(?P<{prefix}warnpre> \-)
(?P<{prefix}warnnum> \d+)
(?P<{prefix}warndwmy> [dwmy])
)?
"""
# http://www.pythonregex.com/
regex = ''.join([
bo,
regex_date_time,
regex_cookie if nocookie or brtype != 'nobrace' else '',
'({ignore}*?)',
bc])
return regex.format(prefix=prefix, ignore=ignore)
TIMESTAMP_NOBRACE_RE = re.compile(
gene_timestamp_regex('nobrace', prefix=''),
re.VERBOSE)
TIMESTAMP_RE = re.compile(
'|'.join((gene_timestamp_regex('active'),
gene_timestamp_regex('inactive'))),
re.VERBOSE)
def copy_repeat_info(f,t):
if(f and hasattr(f,'repeat_rule') and f.repeat_rule):
t.repeatpre = f.repeatpre
t.repeatdwmy = f.repeatdwmy
t.repeat_rule = f.repeat_rule
t.freq = f.freq
def get_repeat_info(rv,mdict):
for prefix in ['active_','inactive_']:
if(prefix+'repeatpre' in mdict):
repeatpre = mdict[prefix+'repeatpre']
repeatnum = mdict[prefix+'repeatnum']
repeatdwmy = mdict[prefix+'repeatdwmy']
if(repeatdwmy is not None and repeatpre is not None):
rv.freq = dr.DAILY
if(repeatdwmy == 'y'):
rv.freq = dr.YEARLY
if(repeatdwmy == 'm'):
rv.freq = dr.MONTHLY
if(repeatdwmy == 'w'):
rv.freq = dr.WEEKLY
rv.repeatnum = int(repeatnum)
if(rv.repeatnum <= 0):
rv.repeatnum = 1
# Build an org mode repeat rule
rv.repeat_rule = dr.rrule(rv.freq,interval=rv.repeatnum,dtstart=rv.start,cache=True)
# This determines what to do when you mark the task as done.
# + just bump to the next FIXED interval (even if thats in the past)
# ++ bump to the next FIXED interval, in the future. (IE next sunday) even if you missed some.
# .+ bump but change the start date to today.
rv.repeatpre = repeatpre
rv.repeatdwmy = repeatdwmy
if(prefix+'warnpre' in mdict):
warnpre = mdict[prefix+'warnpre']
warnnum = mdict[prefix+'warnnum']
warndwmy = mdict[prefix+'warndwmy']
if(warndwmy is not None and warnpre is not None):
rv.warnnum = int(warnnum)
if(rv.warnnum <= 0):
rv.warnnum = 1
rv.wfreq = dr.DAILY
rv.warn_rule = datetime.timedelta(days=rv.warnnum)
if(warndwmy == 'y'):
rv.warn_rule = datetime.timedelta(years=rv.warnnum)
rv.wfreq = dr.YEARLY
if(warndwmy == 'm'):
rv.warn_rule = datetime.timedelta(months=rv.warnnum)
rv.wfreq = dr.MONTHLY
if(warndwmy == 'w'):
rv.warn_rule = datetime.timedelta(weeks=rv.warnnum)
rv.wfreq = dr.WEEKLY
rv.warnpre = warnpre
rv.warndwmy = warndwmy
class OrgDate(object):
_active_default = True
"""
The default active value.
When the `active` argument to ``__init__`` is ``None``,
This value will be used.
"""
def __init__(self, start, end=None, active=None, repeat_rule=None, warn_rule=None):
"""
Create :class:`OrgDate` object
:type start: datetime, date, tuple, int, float or None
:type end: datetime, date, tuple, int, float or None
:arg start: Starting date.
:arg end: Ending date.
:type active: bool or None
:arg active: Active/inactive flag.
None means using its default value, which
may be different for different subclasses.
>>> OrgDate(datetime.date(2012, 2, 10))
OrgDate((2012, 2, 10))
>>> OrgDate((2012, 2, 10))
OrgDate((2012, 2, 10))
>>> OrgDate((2012, 2)) #doctest: +NORMALIZE_WHITESPACE
Traceback (most recent call last):
...
ValueError: Automatic conversion to the datetime object
requires at least 3 elements in the tuple.
Only 2 elements are in the given tuple '(2012, 2)'.
>>> OrgDate((2012, 2, 10, 12, 20, 30))
OrgDate((2012, 2, 10, 12, 20, 30))
>>> OrgDate((2012, 2, 10), (2012, 2, 15), active=False)
OrgDate((2012, 2, 10), (2012, 2, 15), False)
OrgDate can be created using unix timestamp:
>>> OrgDate(datetime.datetime.fromtimestamp(0)) == OrgDate(0)
True
"""
self._start = self._to_date(start)
self._end = self._to_date(end)
self._active = self._active_default if active is None else active
self.repeat_rule = repeat_rule
self.warn_rule = warn_rule
def __add__(self,o):
if(isinstance(o,int)):
d = orgduration.OrgDuration.ParseInt(o)
td = d.timedelta()
return OrgDate(self._start + td,self._end + td if self._end else None,self._active,self.repeat_rule,self.warn_rule)
if(isinstance(o,orgduration.OrgDuration)):
td = o.timedelta()
return OrgDate(self._start + td,self._end + td if self._end else None,self._active,self.repeat_rule,self.warn_rule)
return self
pass
def __sub__(self,o):
if(isinstance(o,int)):
d = orgduration.OrgDuration.ParseInt(o)
td = d.timedelta()
return OrgDate(self._start - td,self._end - td if self._end else None,self._active,self.repeat_rule,self.warn_rule)
if(isinstance(o,orgduration.OrgDuration)):
td = o.timedelta()
return OrgDate(self._start - td,self._end - td if self._end else None,self._active,self.repeat_rule,self.warn_rule)
return self
pass
@staticmethod
def format_date(now, active):
if(active):
return now.strftime("<%Y-%m-%d %a>")
else:
return now.strftime("[%Y-%m-%d %a]")
@staticmethod
def format_clock(now, active):
if(active):
return now.strftime("<%Y-%m-%d %a %H:%M>")
else:
return now.strftime("[%Y-%m-%d %a %H:%M]")
@staticmethod
def format_clock_with_time_range(s, e, active):
if(active):
return s.strftime("<%Y-%m-%d %a %H:%M-") + e.strftime("%H:%M>")
else:
return s.strftime("[%Y-%m-%d %a %H:%M-") + e.strftime("%H:%M>")
@staticmethod
def format_datetime(now):
if(isinstance(now,datetime.datetime)):
return now.strftime("%Y-%m-%d %a %H:%M")
else:
return now.strftime("%Y-%m-%d %a")
@staticmethod
def format_duration(d):
hours = d.seconds/3600
minutes = (d.seconds/60)%60
return "{0:02d}:{1:02d}".format(int(hours),int(minutes))
@staticmethod
def format_dwim(start, end=None,active=False):
if(end):
if(isinstance(start, datetime.datetime)):
if(end.date() != start.date()):
duration = end - start
return "{0}--{1} => {2}".format(
OrgDate.format_clock(start, active),
OrgDate.format_clock(end, active),
OrgDate.format_duration(duration))
else:
return OrgDate.format_clock_with_time_range(start,end,active)
else:
if(end == start):
return OrgDate.format_date(start,active)
else:
duration = end - start
return "{0}--{1} => {2}".format(
OrgDate.format_date(start, active),
OrgDate.format_date(end, active),
OrgDate.format_duration(duration))
else:
if(isinstance(start, datetime.datetime)):
return OrgDate.format_clock(start, active)
else:
return OrgDate.format_date(start,active)
@staticmethod
def format_as_clock(start, end=None,active=False):
if(end):
duration = end - start
return "{0}--{1} => {2}".format(
OrgDate.format_clock(start, active),
OrgDate.format_clock(end, active),
OrgDate.format_duration(duration))
else:
return "{0}--".format(
OrgDate.format_clock(start, active))
def format_clock_str(self):
return OrgDate.format_as_clock(self._start, self._end)
def format_datetime_str(self):
return OrgDate.format_datetime(self._start)
@staticmethod
def _to_date(date):
if isinstance(date, (tuple, list)):
if len(date) == 3:
return datetime.date(*date)
elif len(date) > 3:
return datetime.datetime(*date)
else:
raise ValueError(
"Automatic conversion to the datetime object "
"requires at least 3 elements in the tuple. "
"Only {0} elements are in the given tuple '{1}'."
.format(len(date), date))
elif isinstance(date, (int, float)):
return datetime.datetime.fromtimestamp(date)
else:
return date
@staticmethod
def _date_to_tuple(date):
if isinstance(date, datetime.datetime):
return tuple(date.timetuple()[:6])
elif isinstance(date, datetime.date):
return tuple(date.timetuple()[:3])
def __str__(self):
# TODO: Handle recurrence in this!
return self.format_dwim(self._start, self._end, self._active)
def __repr__(self):
args = [
self.__class__.__name__,
self._date_to_tuple(self.start),
self._date_to_tuple(self.end) if self.has_end() else None,
None if self._active is self._active_default else self._active,
]
if args[2] is None and args[3] is None:
return '{0}({1!r})'.format(*args)
elif args[3] is None:
return '{0}({1!r}, {2!r})'.format(*args)
else:
return '{0}({1!r}, {2!r}, {3!r})'.format(*args)
def __nonzero__(self):
return bool(self._start)
__bool__ = __nonzero__ # PY3
def __eq__(self, other):
if (isinstance(other, OrgDate) and
self._start is None and
other._start is None):
return True
return (isinstance(other, self.__class__) and
self._start == other._start and
self._end == other._end and
self._active == other._active)
@property
def repeating(self):
return self.repeat_rule != None
@property
def warning(self):
return self.warn_rule != None
@property
def next_repeat_from_now(self):
now = datetime.datetime.now()
return self.repeat_rule.after(now,inc=True)
@property
def next_repeat_from_today(self):
# NOTE: This will be on midnight if the schedule doesn't have a time
now = datetime.datetime.now()
now = now.replace(hour=0,minute=0,second=0,microsecond=0)
return self.repeat_rule.after(now,inc=True)
def next_repeat_from(self,now):
#now = now.replace(hour=0,minute=0,second=0,microsecond=0)
return self.repeat_rule.after(now,inc=False)
@property
def deadline_start(self):
if(not self.warning):
self.warn_rule = datetime.timedelta(days=1)
return self.start - self.warn_rule
@property
def start(self):
"""
Get date or datetime object
>>> OrgDate((2012, 2, 10)).start
datetime.date(2012, 2, 10)
>>> OrgDate((2012, 2, 10, 12, 10)).start
datetime.datetime(2012, 2, 10, 12, 10)
"""
return self._start
@property
def end(self):
"""
Get date or datetime object
>>> OrgDate((2012, 2, 10), (2012, 2, 15)).end
datetime.date(2012, 2, 15)
>>> OrgDate((2012, 2, 10, 12, 10), (2012, 2, 15, 12, 10)).end
datetime.datetime(2012, 2, 15, 12, 10)
"""
return self._end
def is_active(self):
"""Return true if the date is active"""
return self._active
def has_end(self):
"""Return true if it has the end date"""
return bool(self._end)
def has_time(self):
"""
Return true if the start date has time field
>>> OrgDate((2012, 2, 10)).has_time()
False
>>> OrgDate((2012, 2, 10, 12, 10)).has_time()
True
"""
return isinstance(self._start, datetime.datetime)
def has_overlap(self, other):
"""
Test if it has overlap with other :class:`OrgDate` instance
If the argument is not an instance of :class:`OrgDate`, it is
converted to :class:`OrgDate` instance by ``OrgDate(other)``
first.
>>> od = OrgDate((2012, 2, 10), (2012, 2, 15))
>>> od.has_overlap(OrgDate((2012, 2, 11)))
True
>>> od.has_overlap(OrgDate((2012, 2, 20)))
False
>>> od.has_overlap(OrgDate((2012, 2, 11), (2012, 2, 20)))
True
>>> od.has_overlap((2012, 2, 11))
True
"""
if not isinstance(other, OrgDate):
other = OrgDate(other)
if self.has_end():
return (self._datetime_in_range(other.start) or
self._datetime_in_range(other.end))
elif other.has_end():
return other._datetime_in_range(self.start)
else:
# These could be datetime entries
# do we care about the hours, probably not!
# this is containement and we are just a point
# if these are on the same day we are okay.
ss = self.start
os = other.start
if(not type(ss) == datetime.date):
ss = ss.date()
if(not type(os) == datetime.date):
os = os.date()
return ss == os
def after(self, date):
if not isinstance(date, (datetime.datetime, datetime.date)):
return False
asdt = self._as_datetime
if asdt(self.start) <= asdt(date):
return True
return False
def before(self, date):
if not isinstance(date, (datetime.datetime, datetime.date)):
return False
asdt = self._as_datetime
if asdt(self.start) >= asdt(date):
return True
return False
def _datetime_in_range(self, date):
if not isinstance(date, (datetime.datetime, datetime.date)):
return False
asdt = self._as_datetime
if asdt(self.start) <= asdt(date) <= asdt(self.end):
return True
return False
@staticmethod
def _as_datetime(date):
if isinstance(date, datetime.date):
return datetime.datetime(*date.timetuple()[:3])
return date
@staticmethod
def _daterange_from_groupdict(dct, prefix=''):
start_keys = ['year', 'month', 'day', 'hour' , 'min']
end_keys = ['year', 'month', 'day', 'end_hour', 'end_min']
start_range = list(map(int, filter(None, (dct[prefix + k] for k in start_keys))))
end_range = list(map(int, filter(None, (dct[prefix + k] for k in end_keys))))
if len(end_range) < len(end_keys):
end_range = None
return (start_range, end_range)
def add_days(self, inc):
self._start += datetime.timedelta(days=inc)
if(self._end):
self._end += datetime.timedelta(days=inc)
# TODO: Handle recurrence rules
@staticmethod
def date_add_months(sourcedate,months):
month = sourcedate.month - 1 + months
year = sourcedate.year + month // 12
month = month % 12 + 1
day = min(sourcedate.day,calendar.monthrange(year,month)[1])
return datetime.date(year,month,day)
def add_months(self, inc):
forward = True
if inc < 0:
inc = -inc
forward = False
for i in range(0,inc):
if(forward):
smonth = self._start.month
syear = self._start.year
self._start += datetime.timedelta(days=calendar.monthrange(syear,smonth)[1])
if(self._end):
emonth = self._end.month
eyear = self._end.year
self._end += datetime.timedelta(days=calendar.monthrange(eyear,emonth)[1])
else:
# TODO: This does not work
# This should be last months day count.
smonth = self._start.month - 1
syear = self._start.year
if(smonth <= 0):
smonth = 12
syear -= 1
self._start -= datetime.timedelta(days=(calendar.monthrange(syear,smonth)[1]))
if(self._end):
emonth = self._end.month - 1
eyear = self._end.year
if(smonth <= 0):
send = 12
send -= 1
self._end -= datetime.timedelta(days=(calendar.monthrange(eyear,emonth)[1]))
# TODO: Handle recurrence rules
@classmethod
def _datetuple_from_groupdict(cls, dct, prefix=''):
return cls._daterange_from_groupdict(dct, prefix=prefix)[0]
@classmethod
def list_from_str(cls, string):
"""
Parse string and return a list of :class:`OrgDate` objects
>>> OrgDate.list_from_str("... <2012-02-10 Fri> and <2012-02-12 Sun>")
[OrgDate((2012, 2, 10)), OrgDate((2012, 2, 12))]
>>> OrgDate.list_from_str("<2012-02-10 Fri>--<2012-02-12 Sun>")
[OrgDate((2012, 2, 10), (2012, 2, 12))]
>>> OrgDate.list_from_str("<2012-02-10 Fri>--[2012-02-12 Sun]")
[OrgDate((2012, 2, 10)), OrgDate((2012, 2, 12), None, False)]
>>> OrgDate.list_from_str("this is not timestamp")
[]
>>> OrgDate.list_from_str("<2012-02-11 Sat 10:11--11:20>")
[OrgDate((2012, 2, 11, 10, 11, 0), (2012, 2, 11, 11, 20, 0))]
"""
match = TIMESTAMP_RE.search(string)
if match:
rest = string[match.end():]
mdict = match.groupdict()
if mdict['active_year']:
prefix = 'active_'
active = True
rangedash = '--<'
else:
prefix = 'inactive_'
active = False
rangedash = '--['
has_rangedash = rest.startswith(rangedash)
match2 = TIMESTAMP_RE.search(rest) if has_rangedash else None
if has_rangedash and match2:
rest = rest[match2.end():]
# no need for check activeness here because of the rangedash
mdict2 = match2.groupdict()
odate = cls(
cls._datetuple_from_groupdict(mdict, prefix),
cls._datetuple_from_groupdict(mdict2, prefix),
active=active)
else:
odate = cls(
*cls._daterange_from_groupdict(mdict, prefix),
active=active)
get_repeat_info(odate, mdict)
# FIXME: treat "repeater" and "warn"
ndate = cls.list_from_str(rest)
if len(ndate) > 0:
copy_repeat_info(ndate[0], odate)
return [odate] + ndate
else:
return []
@classmethod
def from_str(cls, string):
"""
Parse string and return an :class:`OrgDate` objects.
>>> OrgDate.from_str('2012-02-10 Fri')
OrgDate((2012, 2, 10))
>>> OrgDate.from_str('2012-02-10 Fri 12:05')
OrgDate((2012, 2, 10, 12, 5, 0))
"""
match = cls._from_str_re.match(string)
if match:
mdict = match.groupdict()
return cls(cls._datetuple_from_groupdict(mdict),
active=cls._active_default)
else:
return cls(None)
_from_str_re = TIMESTAMP_NOBRACE_RE
def compile_sdc_re(sdctype):
brtype = 'inactive' if sdctype == 'CLOSED' else 'active'
return re.compile(
r'^(?!\#).*{0}:\s+{1}'.format(
sdctype,
gene_timestamp_regex(brtype, prefix='', nocookie=True)),
re.VERBOSE)
class OrgDateSDCBase(OrgDate):
_re = None # override this!
# FIXME: use OrgDate.from_str
@classmethod
def from_str(cls, string):
match = cls._re.search(string)
if match:
mdict = match.groupdict()
start = cls._datetuple_from_groupdict(mdict)
end = None
end_hour = mdict['end_hour']
end_min = mdict['end_min']
if end_hour is not None and end_min is not None:
end_dict = {}
end_dict.update(mdict)
end_dict.update({'hour': end_hour, 'min': end_min})
end = cls._datetuple_from_groupdict(end_dict)
rv = cls(start, end, active=cls._active_default)
repeatpre = mdict['repeatpre']
repeatnum = mdict['repeatnum']
repeatdwmy = mdict['repeatdwmy']
if(repeatdwmy is not None and repeatpre is not None):
rv.freq = dr.DAILY
if(repeatdwmy == 'y'):
rv.freq = dr.YEARLY
if(repeatdwmy == 'm'):
rv.freq = dr.MONTHLY
if(repeatdwmy == 'w'):
rv.freq = dr.WEEKLY
rv.repeatnum = int(repeatnum)
if(rv.repeatnum <= 0):
rv.repeatnum = 1
# Build an org mode repeat rule
rv.repeat_rule = dr.rrule(rv.freq,interval=rv.repeatnum,dtstart=rv.start,cache=True)
# This determines what to do when you mark the task as done.
# + just bump to the next FIXED interval (even if thats in the past)
# ++ bump to the next FIXED interval, in the future. (IE next sunday) even if you missed some.
# .+ bump but change the start date to today.
rv.repeatpre = repeatpre
rv.repeatdwmy = repeatdwmy
warnpre = mdict['warnpre']
warnnum = mdict['warnnum']
warndwmy = mdict['warndwmy']
if(warndwmy is not None and warnpre is not None):
rv.warnnum = int(warnnum)
if(rv.warnnum <= 0):
rv.warnnum = 1
rv.wfreq = dr.DAILY
rv.warn_rule = datetime.timedelta(days=rv.warnnum)
if(warndwmy == 'y'):
rv.warn_rule = datetime.timedelta(years=rv.warnnum)
rv.wfreq = dr.YEARLY
if(warndwmy == 'm'):
rv.warn_rule = datetime.timedelta(months=rv.warnnum)
rv.wfreq = dr.MONTHLY
if(warndwmy == 'w'):
rv.warn_rule = datetime.timedelta(weeks=rv.warnnum)
rv.wfreq = dr.WEEKLY
rv.warnpre = warnpre
rv.warndwmy = warndwmy
return rv
else:
return cls(None)
class OrgDateScheduled(OrgDateSDCBase):
"""Date object to represent SCHEDULED attribute."""
_re = compile_sdc_re('SCHEDULED')
_active_default = True
class OrgDateDeadline(OrgDateSDCBase):
"""Date object to represent DEADLINE attribute."""
_re = compile_sdc_re('DEADLINE')
_active_default = True
class OrgDateClosed(OrgDateSDCBase):
"""Date object to represent CLOSED attribute."""
_re = compile_sdc_re('CLOSED')
_active_default = False
def compile_nsdc_re():
brtype = 'nobrace'
return re.compile(r'^\s*{0}'.format(gene_timestamp_regex(brtype, prefix='', nocookie=True)),re.VERBOSE)
class OrgDateFreeFloating(OrgDateSDCBase):
_active_default = False
_re = compile_nsdc_re()
def parse_sdc(string):
return (OrgDateScheduled.from_str(string),
OrgDateDeadline.from_str(string),
OrgDateClosed.from_str(string))
class OrgDateClock(OrgDate):
"""
Date object to represent CLOCK attributes.
>>> OrgDateClock.from_str(
... 'CLOCK: [2010-08-08 Sun 17:00]--[2010-08-08 Sun 17:30] => 0:30')
OrgDateClock((2010, 8, 8, 17, 0, 0), (2010, 8, 8, 17, 30, 0))
"""
_active_default = False
def __init__(self, start, end, duration=None, active=None):
"""
Create OrgDateClock object
"""
super(OrgDateClock, self).__init__(start, end, active=active)
self._duration = duration
@property
def duration(self):
"""
Get duration of CLOCK.
>>> duration = OrgDateClock.from_str(
... 'CLOCK: [2010-08-08 Sun 17:00]--[2010-08-08 Sun 17:30] => 0:30'
... ).duration
>>> duration.seconds
1800
>>> total_minutes(duration)
30.0
"""
return self.end - self.start
def is_duration_consistent(self):
"""
Check duration value of CLOCK line.
>>> OrgDateClock.from_str(
... 'CLOCK: [2010-08-08 Sun 17:00]--[2010-08-08 Sun 17:30] => 0:30'
... ).is_duration_consistent()
True
>>> OrgDateClock.from_str(
... 'CLOCK: [2010-08-08 Sun 17:00]--[2010-08-08 Sun 17:30] => 0:15'
... ).is_duration_consistent()
False
"""
return (self._duration is None or
self._duration == total_minutes(self.duration))
@classmethod
def from_str(cls, line):
"""
Get CLOCK from given string.
Return three tuple (start, stop, length) which is datetime object
of start time, datetime object of stop time and length in minute.
"""
match = cls._re.search(line)
if not match:
return cls(None, None)
groups = [int(d) for d in match.groups()]
ymdhm1 = groups[:5]
ymdhm2 = groups[5:10]
hm3 = groups[10:]
return cls(
datetime.datetime(*ymdhm1),
datetime.datetime(*ymdhm2),
hm3[0] * 60 + hm3[1],
)
_re = re.compile(
r'^(?!#).*CLOCK:\s+'
r'\[(\d+)\-(\d+)\-(\d+)[^\]\d]*(\d+)\:(\d+)\]--'
r'\[(\d+)\-(\d+)\-(\d+)[^\]\d]*(\d+)\:(\d+)\]\s+=>\s+(\d+)\:(\d+)'
)
class OrgDateRepeatedTask(OrgDate):
"""
Date object to represent repeated tasks.
"""
_active_default = False
def __init__(self, start, before, after, active=None):
super(OrgDateRepeatedTask, self).__init__(start, active=active)
self._before = before
self._after = after
def __repr__(self):
args = [self._date_to_tuple(self.start), self.before, self.after]
if self._active is not self._active_default:
args.append(self._active)
return '{0}({1})'.format(
self.__class__.__name__, ', '.join(map(repr, args)))
def __eq__(self, other):
return super(OrgDateRepeatedTask, self).__eq__(other) and \
isinstance(other, self.__class__) and \
self._before == other._before and \
self._after == other._after
@property
def before(self):
"""
The state of task before marked as done.
>>> od = OrgDateRepeatedTask((2005, 9, 1, 16, 10, 0), 'TODO', 'DONE')
>>> od.before
'TODO'
"""
return self._before
@property
def after(self):
"""
The state of task after marked as done.
>>> od = OrgDateRepeatedTask((2005, 9, 1, 16, 10, 0), 'TODO', 'DONE')
>>> od.after
'DONE'
"""
return self._after
|
#!python
import sentinel_api as api
# use username and password for ESA DATA Hub authentication
username = '****YOUR_ESA_DATA_HUB_USERNAME****'
password = '****YOUR_ESA_DATA_HUB_PASSWORD****'
# please also specify the Hub URL:
# All Sentinel-1 and -2 scenes beginning from 15th Nov. 2015: https://scihub.esa.int/apihub/
# All historic Sentinel-1 scenes: https://scihub.esa.int/dhus/
s2 = api.SentinelDownloader(username, password, api_url='https://scihub.copernicus.eu/apihub/')
# set directory for
# - filter scenes list with existing files
# - set directory path for data download
s2.set_download_dir('./')
# load geometries from shapefile
s2.load_sites('wetlands_v8.shp')
# search for scenes with some restrictions (minimum overlap: 1%)
s2.search('S2A*', min_overlap=0.01)
# you can either write results to a bash file for wget or download files directly in this script
# s2.write_results('wget', 'sentinel_api_s2_download.sh')
s2.download_all() |
#!/usr/bin/env python
"""
Created by howie.hu at 2018/11/21.
"""
from ruia import Request, Spider, Middleware
middleware = Middleware()
@middleware.request
async def print_on_request(request):
request.headers = {
'User-Agent': 'ruia ua'
}
@middleware.response
async def print_on_response(request, response):
print(request.headers)
async def retry_func(request):
request.request_config['TIMEOUT'] = 10
class TestSpider(Spider):
start_urls = ['http://www.httpbin.org/get']
request_config = {
'RETRIES': 3,
'DELAY': 0,
'TIMEOUT': 0.1,
'RETRY_FUNC': retry_func
}
async def parse(self, res):
pages = ['http://www.httpbin.org/get', 'http://www.httpbin.org/get']
for index, page in enumerate(pages):
yield Request(
page,
callback=self.parse_item,
metadata={'index': index},
request_config=self.request_config,
)
async def parse_item(self, res):
item_data = res.html
return item_data
if __name__ == '__main__':
TestSpider.start(middleware=middleware)
|
##
## The class SensorReader will read the raw ultrasonic sensor measurements
## and converts them into centimeter. After this, the readings will be filtered by a
## median filter of the size which is specified as a parameter. The function
## getSensorReadings() can be called every timestep and will return the
## three filtered ultrasonic readings of the left, right and front sensor.
##
## sr=SensorReader(5)
## while True:
## ul,ur,uf =sr.getSensorReadings()
##
import numpy as np
import RPi.GPIO as GPIO
import time
# Use BCM GPIO references
# instead of physical pin numbers
GPIO.setmode(GPIO.BCM)
class SensorReader:
# creates a SensorReader object that will get the raw sensor readings and
# filter them with a median filter of specified size
def __init__(self,size):
self.size=size
self.readingsLeft=np.zeros(size)
self.readingsRight=np.zeros(size)
self.readingsFront=np.zeros(size+2)
# Define GPIO to use on Pi
self.triggerLeft=2 #pin3
self.echoLeft=3 #pin5
self.triggerRight=17 #pin11
self.echoRight=18 #pin12
self.triggerFront=22 #pin15
self.echoFront=23 #pin16
# Set pins as output and input
GPIO.setup(self.triggerLeft,GPIO.OUT)
GPIO.setup(self.echoLeft,GPIO.IN)
GPIO.setup(self.triggerRight,GPIO.OUT)
GPIO.setup(self.echoRight,GPIO.IN)
GPIO.setup(self.triggerFront,GPIO.OUT)
GPIO.setup(self.echoFront,GPIO.IN)
# Set trigger to False (Low)
GPIO.output(self.triggerLeft, False)
GPIO.output(self.triggerRight, False)
GPIO.output(self.triggerFront, False)
# Allow module to settle
time.sleep(0.5)
print "SensorReader created!"
# returns the filtered sensor readings ul, ur and uf
def getSensorReadings(self):
values=self.getCurrentValues()
ul,ur,uf=self.filterReading(values)
return ul,ur,uf
# filters the raw sensor readings with a median filter of size self.size
def filterReading(self,values):
for i in reversed(xrange(self.size-1)):
self.readingsLeft[i+1]=self.readingsLeft[i]
self.readingsRight[i+1]=self.readingsRight[i]
for i in reversed(xrange(self.size+2-1)):
self.readingsFront[i+1]=self.readingsFront[i]
self.readingsLeft[0]=values[0]
self.readingsRight[0]=values[1]
self.readingsFront[0]=values[2]
filterLeft=np.sort(self.readingsLeft)
filterRight=np.sort(self.readingsRight)
filterFront=np.sort(self.readingsFront)
return filterLeft[self.size/2],filterRight[self.size/2],filterFront[(self.size+2)/2]
# reads the GPIO pins and returns an array with unfiltered sensor readings
def getCurrentValues(self):
# read the raw distance values in cm
valueLeft=self.readUltrasonic(self.triggerLeft,self.echoLeft)
valueRight=self.readUltrasonic(self.triggerRight,self.echoRight)
time.sleep(0.01)
valueFront=self.readUltrasonic(self.triggerFront,self.echoFront)
# save the sensor readings in an array and return it
values=np.array([valueLeft,valueRight,valueFront])
return values
# triggers the trigger pin of ultrasonic module, then it waits
# for the echo and calculates the distance in cm
def readUltrasonic(self,trigger,echo):
# Send 10us pulse to trigger
GPIO.output(trigger, True)
time.sleep(0.00001)
GPIO.output(trigger, False)
start = time.time()
start1=time.time()
# wait for echo
while GPIO.input(echo)==0 and start-start1<0.1:
start = time.time()
stop = time.time()
while GPIO.input(echo)==1 and stop-start<0.00583:
stop = time.time()
# Calculate pulse length
#elapsed = stop-start
# Distance pulse travelled in that time is time
# multiplied by the speed of sound (cm/s), devided
# by two since the sound did twice the distance
#distance = ((stop-start) * 34300)/2
distance = (stop-start) * 17150
return round(distance,2)
def kill(self):
# Reset GPIO settings
GPIO.cleanup()
##sr=SensorReader(5)
##while True:
## ul,ur,uf =sr.getSensorReadings()
## print ("Distance: "+str(ul)+"cm")
|
import random
import time
from crawlers.Spider import Spider as SpiderBase
from crawlers.Types import ArticleReturnType
from models.Article import Article
from models.Media import Media
from playwright.sync_api import BrowserContext, Page
from utils import Logger
from utils.crawl import (
cleanhtml,
get_elements_from_selector,
get_html_from_one_of_selectors,
get_html_from_selector,
random_activity,
)
from utils.hash import sha256_bytes
LOGGER = Logger.Logger()
class Spider(SpiderBase):
def __init__(
self,
context: BrowserContext,
page: Page,
options: object,
) -> None:
super().__init__(context, page, options)
def run(self, url: str) -> ArticleReturnType:
self.page.goto(url)
# Skip GPDR modal
self.page.wait_for_selector("button.ot-pc-refuse-all-handler")
self.page.query_selector("button.ot-pc-refuse-all-handler").click()
random_activity(self.page, intensifier=2.5)
time.sleep(random.randint(0, 2))
random_activity(self.page, intensifier=2.5)
article_content_html = ""
article_contents = self.page.query_selector_all(
"div[class^='ArticleBody__content'] p"
)
for article_content in article_contents:
article_content_html += "\n\n" + article_content.inner_html()
medias = []
article_image_bytes = None
try:
article_images = get_elements_from_selector(
self.page, "div[class^='ArticleBody__container'] img"
)
if article_images:
for article_image in article_images:
article_image_bytes = article_image.screenshot()
if article_image_bytes:
medias.append(
Media(
sha256=sha256_bytes(article_image_bytes),
blob=article_image_bytes,
)
)
except AttributeError:
LOGGER.debug("No image found for this article")
article_title_html = get_html_from_selector(
self.page, "div[class^='Article__container'] h1[class^='Text__text']"
)
article_headline_html = ""
article_date_html = get_html_from_selector(
self.page, "div[class^='Article__container'] time[class^='Text__text']"
)
article_author_html = get_html_from_one_of_selectors(
self.page,
[
"div[class*='ArticleHeader__author']",
],
)
article = Article(
source="Reuters",
language="eng",
url=url,
title=cleanhtml(article_title_html),
headline=cleanhtml(article_headline_html),
article=cleanhtml(article_content_html),
author=cleanhtml(article_author_html),
published_at_txt=cleanhtml(article_date_html),
)
return {"article": article, "medias": medias}
|
"""
A pytest module to test Reed-Solomon decoding.
"""
import pytest
import numpy as np
import galois
from .helper import random_errors
CODES = [
(15, 13), # GF(2^4) with t=1
(15, 11), # GF(2^4) with t=2
(15, 9), # GF(2^4) with t=3
(15, 7), # GF(2^4) with t=4
(15, 5), # GF(2^4) with t=5
(15, 3), # GF(2^4) with t=6
(15, 1), # GF(2^4) with t=7
(16, 14), # GF(17) with t=1
(16, 12), # GF(17) with t=2
(16, 10), # GF(17) with t=3
(26, 24), # GF(3^3) with t=1
(26, 22), # GF(3^3) with t=2
(26, 20), # GF(3^3) with t=3
]
def test_exceptions():
# Systematic
n, k = 15, 11
rs = galois.ReedSolomon(n, k)
GF = rs.field
with pytest.raises(TypeError):
rs.decode(GF.Random(n).tolist())
with pytest.raises(ValueError):
rs.decode(GF.Random(n + 1))
# Non-systematic
n, k = 15, 11
rs = galois.ReedSolomon(n, k, systematic=False)
GF = rs.field
with pytest.raises(TypeError):
rs.decode(GF.Random(n).tolist())
with pytest.raises(ValueError):
rs.decode(GF.Random(n - 1))
class TestSystematic:
@pytest.mark.parametrize("size", CODES)
def test_all_correctable(self, size):
n, k = size[0], size[1]
N = 100
rs = galois.ReedSolomon(n, k)
GF = rs.field
M = GF.Random((N, k))
C = rs.encode(M)
E, N_errors = random_errors(GF, N, n, rs.t)
R = C + E
DEC_M = rs.decode(R)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M, M)
DEC_M, N_corr = rs.decode(R, errors=True)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M, M)
assert np.array_equal(N_corr, N_errors)
DEC_M = rs.decode(R.view(np.ndarray))
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M, M)
DEC_M, N_corr = rs.decode(R.view(np.ndarray), errors=True)
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M, M)
assert np.array_equal(N_corr, N_errors)
@pytest.mark.parametrize("size", CODES)
def test_some_uncorrectable(self, size):
n, k = size[0], size[1]
N = 100
rs = galois.ReedSolomon(n, k)
GF = rs.field
M = GF.Random((N, k))
C = rs.encode(M)
E, N_errors = random_errors(GF, N, n, rs.t + 1)
R = C + E
corr_idxs = np.where(N_errors <= rs.t)[0]
DEC_M = rs.decode(R)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
DEC_M, N_corr = rs.decode(R, errors=True)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
assert np.array_equal(N_corr[corr_idxs], N_errors[corr_idxs])
DEC_M = rs.decode(R.view(np.ndarray))
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
DEC_M, N_corr = rs.decode(R.view(np.ndarray), errors=True)
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
assert np.array_equal(N_corr[corr_idxs], N_errors[corr_idxs])
class TestSystematicShortened:
@pytest.mark.parametrize("size", CODES)
def test_all_correctable(self, size):
n, k = size[0], size[1]
if k == 1:
return
ks = k // 2 # Shorten the code in half
ns = n - (k - ks)
N = 100
rs = galois.ReedSolomon(n, k)
GF = rs.field
M = GF.Random((N, ks))
C = rs.encode(M)
E, N_errors = random_errors(GF, N, ns, rs.t)
R = C + E
DEC_M = rs.decode(R)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M, M)
DEC_M, N_corr = rs.decode(R, errors=True)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M, M)
assert np.array_equal(N_corr, N_errors)
DEC_M = rs.decode(R.view(np.ndarray))
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M, M)
DEC_M, N_corr = rs.decode(R.view(np.ndarray), errors=True)
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M, M)
assert np.array_equal(N_corr, N_errors)
@pytest.mark.parametrize("size", CODES)
def test_some_uncorrectable(self, size):
n, k = size[0], size[1]
if k == 1:
return
ks = k // 2 # Shorten the code in half
ns = n - (k - ks)
N = 100
rs = galois.ReedSolomon(n, k)
GF = rs.field
M = GF.Random((N, ks))
C = rs.encode(M)
E, N_errors = random_errors(GF, N, ns, rs.t + 1)
R = C + E
corr_idxs = np.where(N_errors <= rs.t)[0]
DEC_M = rs.decode(R)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
DEC_M, N_corr = rs.decode(R, errors=True)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
assert np.array_equal(N_corr[corr_idxs], N_errors[corr_idxs])
DEC_M = rs.decode(R.view(np.ndarray))
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
DEC_M, N_corr = rs.decode(R.view(np.ndarray), errors=True)
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
assert np.array_equal(N_corr[corr_idxs], N_errors[corr_idxs])
class TestNonSystematic:
@pytest.mark.parametrize("size", CODES)
def test_all_correctable(self, size):
n, k = size[0], size[1]
N = 100
rs = galois.ReedSolomon(n, k, systematic=False)
GF = rs.field
M = GF.Random((N, k))
C = rs.encode(M)
E, N_errors = random_errors(GF, N, n, rs.t)
R = C + E
DEC_M = rs.decode(R)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M, M)
DEC_M, N_corr = rs.decode(R, errors=True)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M, M)
assert np.array_equal(N_corr, N_errors)
DEC_M = rs.decode(R.view(np.ndarray))
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M, M)
DEC_M, N_corr = rs.decode(R.view(np.ndarray), errors=True)
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M, M)
assert np.array_equal(N_corr, N_errors)
@pytest.mark.parametrize("size", CODES)
def test_some_uncorrectable(self, size):
n, k = size[0], size[1]
N = 100
rs = galois.ReedSolomon(n, k, systematic=False)
GF = rs.field
M = GF.Random((N, k))
C = rs.encode(M)
E, N_errors = random_errors(GF, N, n, rs.t + 1)
R = C + E
corr_idxs = np.where(N_errors <= rs.t)[0]
DEC_M = rs.decode(R)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
DEC_M, N_corr = rs.decode(R, errors=True)
assert type(DEC_M) is GF
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
assert np.array_equal(N_corr[corr_idxs], N_errors[corr_idxs])
DEC_M = rs.decode(R.view(np.ndarray))
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
DEC_M, N_corr = rs.decode(R.view(np.ndarray), errors=True)
assert type(DEC_M) is np.ndarray
assert np.array_equal(DEC_M[corr_idxs,:], M[corr_idxs,:])
assert np.array_equal(N_corr[corr_idxs], N_errors[corr_idxs])
|
"""Tests for runners."""
import os
import unittest
from ecosystem.runners import PythonTestsRunner, PythonStyleRunner, PythonCoverageRunner
class TestPythonRunner(unittest.TestCase):
"""Tests for Python runner."""
def setUp(self) -> None:
current_directory = os.path.dirname(os.path.abspath(__file__))
self.simple_project_dir = (
f"{current_directory}/" f"../resources/simple_python_repository"
)
self.configured_project_dir = (
f"{current_directory}/" f"../resources/configured_python_repository"
)
def tearDown(self) -> None:
files_to_delete = ["tox.ini", "terra_version.txt"]
for directory in [self.simple_project_dir, self.configured_project_dir]:
for file in files_to_delete:
if os.path.exists(f"{directory}/{file}"):
os.remove(f"{directory}/{file}")
def test_tests_runner_on_simple_repo(self):
"""Simple runner test.
Function: PythonTestsRunner
-> cloned_repo_directory
Args:
simple_repo
"""
runner = PythonTestsRunner(
"test", working_directory=self.simple_project_dir, ecosystem_deps=["qiskit"]
)
runner.cloned_repo_directory = self.simple_project_dir
terra_version, result = runner.workload()
self.assertFalse(all(r.has_qiskit_deprecation_logs for r in result))
self.assertTrue(all(r.ok for r in result))
self.assertTrue(terra_version)
def test_tests_runner_on_configured_repo(self):
"""Configured repo runner test.
Function: PythonTestsRunner
-> cloned_repo_directory
Args:
configured_repo
"""
runner = PythonTestsRunner(
"test",
working_directory=self.configured_project_dir,
ecosystem_deps=["qiskit"],
)
runner.cloned_repo_directory = self.configured_project_dir
terra_version, result = runner.workload()
self.assertTrue(all(r.ok for r in result))
self.assertTrue(terra_version)
def test_styles_runner_on_configured_repo(self):
"""Configured repo runner test.
Function: PythonStyleRunner
-> cloned_repo_directory
Args:
configured_repo
"""
runner = PythonStyleRunner(
"test", working_directory=self.configured_project_dir
)
runner.cloned_repo_directory = self.configured_project_dir
_, result = runner.workload()
self.assertTrue(len(result) > 0)
def test_coverages_runner_on_configured_repo(self):
"""Configured repo runner test.
Function: PythonStyleRunner
-> cloned_repo_directory
Args:
configured_repo
"""
runner = PythonCoverageRunner(
"test", working_directory=self.configured_project_dir
)
runner.cloned_repo_directory = self.configured_project_dir
_, result = runner.workload()
self.assertTrue(len(result) > 0)
|
"""The test project for the draw_rect interface.
Command examples:
$ python test_projects/draw_rect/main.py
"""
import sys
sys.path.append('./')
import os
from types import ModuleType
import apysc as ap
from apysc._file import file_util
this_module: ModuleType = sys.modules[__name__]
_DEST_DIR_PATH: str = os.path.join(
file_util.get_abs_module_dir_path(module=this_module),
'test_output/'
)
def main() -> None:
"""
Entry point of this test project.
"""
stage: ap.Stage = ap.Stage(
background_color='#333',
stage_width=1000, stage_height=500, stage_elem_id='stage')
ap.set_debug_mode()
# Basic functional test case.
sprite: ap.Sprite = ap.Sprite()
sprite.graphics.begin_fill(color='#00aaff')
sprite.graphics.draw_rect(x=50, y=50, width=50, height=50)
stage.add_child(child=sprite)
# Test for begin_fill interface.
sprite.graphics.begin_fill(color='#00aaff', alpha=0.5)
sprite.graphics.draw_rect(x=150, y=50, width=50, height=50)
# Test for line_style interface.
sprite.graphics.begin_fill(color='#00aaff')
sprite.graphics.line_style(color='#fff', thickness=3, alpha=0.7)
sprite.graphics.draw_rect(x=250, y=50, width=50, height=50)
# Test for rectangle x position update.
rectangle: ap.Rectangle = sprite.graphics.draw_rect(
x=0, y=50, width=50, height=50)
rectangle.x = ap.Int(350)
# Test for rectangle y position update.
rectangle = sprite.graphics.draw_rect(
x=450, y=0, width=50, height=50)
rectangle.y = ap.Int(50)
# Test for rectangle width update.
rectangle = sprite.graphics.draw_rect(
x=550, y=50, width=50, height=50)
rectangle.width = ap.Int(100)
# Test for rectangle height update.
rectangle = sprite.graphics.draw_rect(
x=700, y=50, width=50, height=50)
rectangle.height = ap.Int(100)
# Test for rectangle fill color update.
rectangle = sprite.graphics.draw_rect(
x=800, y=50, width=50, height=50)
rectangle.fill_color = ap.String('#f0a')
# Test for rectangle fill alpha update.
rectangle = sprite.graphics.draw_rect(
x=900, y=50, width=50, height=50)
rectangle.fill_alpha = ap.Number(0.5)
# Test for rectangle line color update.
rectangle = sprite.graphics.draw_rect(
x=50, y=150, width=50, height=50)
rectangle.line_color = ap.String('#f0a')
# Test for rectangle line thickness update.
rectangle = sprite.graphics.draw_rect(
x=150, y=150, width=50, height=50)
rectangle.line_thickness = ap.Int(1)
# Test for rectangle line alpha update.
rectangle = sprite.graphics.draw_rect(
x=250, y=150, width=50, height=50)
rectangle.line_alpha = ap.Number(1.0)
_another_func(stage=stage, sprite=sprite)
sprite.graphics.draw_rect(
x=450, y=150, width=50, height=50)
# Test for rectangle fill alpha update with Number.
number_1: ap.Number = ap.Number(0.725)
rectangle = sprite.graphics.draw_rect(
x=550, y=150, width=50, height=50)
rectangle.fill_alpha = number_1
# Test for each attribute values are immutable.
rectangle.fill_alpha = ap.Number(0.5)
fill_alpha: ap.Number = rectangle.fill_alpha
fill_alpha += 0.2
ap.assert_not_equal(left=fill_alpha, right=rectangle.fill_alpha)
rectangle.x = ap.Int(550)
x: ap.Int = rectangle.x
x += 100
ap.assert_not_equal(left=x, right=rectangle.x)
rectangle.y = ap.Int(150)
y: ap.Int = rectangle.y
y += 100
ap.assert_not_equal(left=y, right=rectangle.y)
rectangle.line_thickness = ap.Int(2)
line_thickness: ap.Int = rectangle.line_thickness
line_thickness += 1
ap.assert_not_equal(
left=line_thickness, right=rectangle.line_thickness)
rectangle.line_alpha = ap.Number(0.5)
line_alpha: ap.Number = rectangle.line_alpha
line_alpha += 0.2
ap.assert_not_equal(left=line_alpha, right=rectangle.line_alpha)
width: ap.Int = rectangle.width
width = ap.Int(150)
ap.assert_not_equal(left=width, right=rectangle.width)
height: ap.Int = rectangle.height
height = ap.Int(200)
ap.assert_not_equal(left=height, right=rectangle.height)
sprite.graphics.line_style(
color='#fff', thickness=5, dot_setting=ap.LineDotSetting(dot_size=5))
rectangle = sprite.graphics.draw_rect(
x=650, y=150, width=50, height=50)
ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)
def _another_func(stage: ap.Stage, sprite: ap.Sprite) -> None:
"""
Another function to test expression and arguments behavior.
Parameters
----------
stage : Stage
Stage instance.
sprite : Sprite
Sprite instance.
"""
sprite.graphics.begin_fill(color='#f0a')
sprite.graphics.draw_rect(x=350, y=150, width=50, height=50)
stage.add_child(child=sprite)
if __name__ == '__main__':
main()
|
"""
.. module: lemur.plugins.lemur_aws.s3
:platform: Unix
:synopsis: Contains helper functions for interactive with AWS S3 Apis.
:copyright: (c) 2018 by Netflix Inc., see AUTHORS for more
:license: Apache, see LICENSE for more details.
.. moduleauthor:: Kevin Glisson <kglisson@netflix.com>
"""
from flask import current_app
from .sts import sts_client
@sts_client("s3", service_type="resource")
def put(bucket_name, region, prefix, data, encrypt, **kwargs):
"""
Use STS to write to an S3 bucket
"""
bucket = kwargs["resource"].Bucket(bucket_name)
current_app.logger.debug(
"Persisting data to S3. Bucket: {0} Prefix: {1}".format(bucket_name, prefix)
)
# get data ready for writing
if isinstance(data, str):
data = data.encode("utf-8")
if encrypt:
bucket.put_object(
Key=prefix,
Body=data,
ACL="bucket-owner-full-control",
ServerSideEncryption="AES256",
)
else:
bucket.put_object(Key=prefix, Body=data, ACL="bucket-owner-full-control")
|
import numpy as np
from math import sqrt
def str2bit(s):
return s.strip().replace('/', '').replace('.', '0').replace('#', '1')
def str2val(s):
return int(str2bit(s), 2)
def str2arr(s):
return (np.array([list(row) for row in s.strip().split('/')]) == '#').astype(np.uint8)
def arr2val(a):
return int(''.join(map(str, a.flatten())), 2)
def permvals(a):
p = set()
p.add(arr2val(a))
a = a.T
p.add(arr2val(a))
a = np.flipud(a)
p.add(arr2val(a))
a = a.T
p.add(arr2val(a))
a = np.flipud(a)
p.add(arr2val(a))
a = a.T
p.add(arr2val(a))
a = np.flipud(a)
p.add(arr2val(a))
a = a.T
p.add(arr2val(a))
return list(p)
rule = { 2: {}, 3: {} }
with open('21.txt') as f:
for line in f:
a, b = line.split(' => ')
i = 2 if len(a) == 5 else 3
p = permvals(str2arr(a)) # search patterns
r = str2val(b) # replacement
for j in p:
rule[i][j] = r
def partition(bitstr, area, size, chunk):
step = size * chunk # index step of 2 or 3 rows
p = []
for i in range(0, area, step): # row index per 2 or 3 rows
for j in range(0, size, chunk): # col index per 2 or 3 cols
s = ''
for k in range(0, step, size): # extra row index for 2 or 3 conseq. rows
n = i + j + k
s += bitstr[n:n + chunk]
p.append(int(s, 2))
return p
def val2bit(val, partarea):
s = bin(val)[2:]
return '0' * (partarea - len(s)) + s
def evolve(bitstr):
area = len(bitstr)
size = int(sqrt(area))
chunk = 2 if size % 2 == 0 else 3
parts = partition(bitstr, area, size, chunk)
transform = list(map(lambda x: rule[chunk][x], parts))
partperrow = size // chunk
parts = partperrow * partperrow
chunk += 1
size = partperrow * chunk
area = size * size
partarea = chunk * chunk
a = list(map(lambda x: val2bit(x, partarea), transform))
s = ''
for i in range(0, parts, partperrow):
for k in range(0, partarea, chunk):
for j in range(partperrow):
s += a[i + j][k:k + chunk]
return s
image = '.#./..#/###'
art = str2bit(image)
print(0, sum(map(int, list(art))))
for i in range(1, 19):
art = evolve(art)
print(i, sum(map(int, list(art))))
# part 1: 179
# part 2: 2766750
|
#!/usr/bin/env python
import rospy
import threading
import time
from robot_sim.msg import RobotState
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
from math import sin
from math import cos
from math import pi
class CartPoleGUI(object):
def __init__(self):
self.sub = rospy.Subscriber("robot_state", RobotState, self.update_state, queue_size = 100)
self.fig = plt.figure(figsize=(10, 10))
self.ax = self.fig.add_subplot(111)
self.ax.axis('equal')
plt.xlim(-1.5, 1.5)
plt.ylim(-1, 2)
self.fig.canvas.draw()
plt.show(block=False)
self.x = 0
self.q = 0
def update_state(self,msg):
self.x = msg.robot_state[0]
self.q = msg.robot_state[1]
def render(self):
plt.figure(self.fig.number)
self.ax.clear()
self.ax.axhline(y=-0.05, color='k')
self.ax.add_patch(plt.Rectangle(xy=[-0.15 + self.x, -0.1], height=0.2, width=0.3, fill=True, facecolor='slategray'))
l = 1.0
self.ax.add_line(mlines.Line2D((self.x, self.x - l*sin(self.q)), (0, l*cos(self.q)), color='firebrick', linewidth=10))
self.fig.canvas.draw()
def my_thread():
rospy.spin()
if __name__ == '__main__':
rospy.init_node('cartpole_gui', anonymous=True)
gui = CartPoleGUI()
my_thread = threading.Thread(target=my_thread)
my_thread.daemon = True
my_thread.start()
while not rospy.is_shutdown():
gui.render()
time.sleep(0.005) |
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
from cvxpy.expressions.constants import Constant
def constant_canon(expr, real_args, imag_args, real2imag):
if expr.is_real():
return Constant(expr.value.real), None
elif expr.is_imag():
return None, Constant(expr.value.imag)
else:
return (Constant(expr.value.real),
Constant(expr.value.imag))
|
import re
import os
import sys
import time
import random
import subprocess
from PyQt5 import QtGui
from PyQt5 import QtCore, QtGui, QtWidgets
def open_file(filename):
if sys.platform == "win32":
os.startfile(filename)
else:
opener = "open" if sys.platform == "darwin" else "xdg-open"
subprocess.call([opener, filename])
# Qt Main Class
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(640, 480)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(640, 480))
MainWindow.setMaximumSize(QtCore.QSize(640, 480))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setGeometry(QtCore.QRect(10, 10, 621, 431))
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.pushButton = QtWidgets.QPushButton(self.tab)
self.pushButton.setGeometry(QtCore.QRect(10, 340, 75, 23))
self.pushButton.setObjectName("pushButton")
self.lineEdit = QtWidgets.QLineEdit(self.tab)
self.lineEdit.setGeometry(QtCore.QRect(90, 10, 201, 20))
self.lineEdit.setObjectName("lineEdit")
self.lineEdit_2 = QtWidgets.QLineEdit(self.tab)
self.lineEdit_2.setGeometry(QtCore.QRect(400, 10, 201, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.lineEdit_3 = QtWidgets.QLineEdit(self.tab)
self.lineEdit_3.setGeometry(QtCore.QRect(90, 40, 201, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label = QtWidgets.QLabel(self.tab)
self.label.setGeometry(QtCore.QRect(10, 10, 61, 21))
self.label.setObjectName("label")
self.label_2 = QtWidgets.QLabel(self.tab)
self.label_2.setGeometry(QtCore.QRect(310, 10, 61, 21))
self.label_2.setObjectName("label_2")
self.label_3 = QtWidgets.QLabel(self.tab)
self.label_3.setGeometry(QtCore.QRect(10, 40, 61, 21))
self.label_3.setObjectName("label_3")
self.label_4 = QtWidgets.QLabel(self.tab)
self.label_4.setGeometry(QtCore.QRect(310, 70, 61, 21))
self.label_4.setObjectName("label_4")
self.lineEdit_4 = QtWidgets.QLineEdit(self.tab)
self.lineEdit_4.setGeometry(QtCore.QRect(400, 40, 201, 20))
self.lineEdit_4.setObjectName("lineEdit_4")
self.label_5 = QtWidgets.QLabel(self.tab)
self.label_5.setGeometry(QtCore.QRect(10, 70, 71, 21))
self.label_5.setObjectName("label_5")
self.lineEdit_5 = QtWidgets.QLineEdit(self.tab)
self.lineEdit_5.setGeometry(QtCore.QRect(90, 70, 201, 20))
self.lineEdit_5.setObjectName("lineEdit_5")
self.label_6 = QtWidgets.QLabel(self.tab)
self.label_6.setGeometry(QtCore.QRect(310, 40, 81, 21))
self.label_6.setObjectName("label_6")
self.lineEdit_6 = QtWidgets.QLineEdit(self.tab)
self.lineEdit_6.setGeometry(QtCore.QRect(400, 70, 201, 20))
self.lineEdit_6.setObjectName("lineEdit_6")
self.progressBar = QtWidgets.QProgressBar(self.tab)
self.progressBar.setGeometry(QtCore.QRect(40, 371, 561, 20))
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName("progressBar")
self.lineEdit_7 = QtWidgets.QLineEdit(self.tab)
self.lineEdit_7.setEnabled(False)
self.lineEdit_7.setGeometry(QtCore.QRect(90, 341, 511, 20))
self.lineEdit_7.setObjectName("lineEdit_7")
self.label_8 = QtWidgets.QLabel(self.tab)
self.label_8.setGeometry(QtCore.QRect(310, 100, 291, 21))
self.label_8.setObjectName("label_8")
self.textEdit = QtWidgets.QTextEdit(self.tab)
self.textEdit.setGeometry(QtCore.QRect(10, 130, 591, 201))
self.textEdit.setObjectName("textEdit")
self.dateEdit = QtWidgets.QDateEdit(self.tab)
self.dateEdit.setGeometry(QtCore.QRect(90, 100, 201, 22))
self.dateEdit.setObjectName("dateEdit")
self.label_9 = QtWidgets.QLabel(self.tab)
self.label_9.setGeometry(QtCore.QRect(10, 100, 61, 21))
self.label_9.setObjectName("label_9")
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.label_7 = QtWidgets.QLabel(self.tab_2)
self.label_7.setGeometry(QtCore.QRect(16, 10, 591, 381))
font = QtGui.QFont()
font.setFamily("Jellee Roman")
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.tabWidget.addTab(self.tab_2, "")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 640, 21))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
self.pushButton.clicked.connect(self.generate)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
# The Generator Function
def generate(self):
first_name = self.lineEdit.text()
last_name = self.lineEdit_2.text()
id_no = self.lineEdit_3.text()
phone_no = self.lineEdit_4.text()
country = self.lineEdit_5.text()
city = self.lineEdit_6.text()
simple_birth = self.dateEdit.text()
birth = re.sub("[^\w]", " ", simple_birth).split()
day = birth[0]
month = birth[1]
year = birth[2]
others = self.textEdit.toPlainText()
othersList = re.sub("[^\w]", " ", others).split()
infoList = [first_name, last_name, id_no, phone_no, country, city, day, month, year]
ninfoList = []
for word in infoList:
ninfoList.append(word.upper())
if 'a' in word: ninfoList.append(word.replace("a", "@"))
if 'b' in word: ninfoList.append(word.replace("b", "4"))
if 'e' in word: ninfoList.append(word.replace("e", "3"))
if 'o' in word: ninfoList.append(word.replace("o", "0"))
if 'h' in word: ninfoList.append(word.replace("h", "8"))
if 'i' in word: ninfoList.append(word.replace("i", "1"))
data = othersList + infoList + ninfoList
f = open("passwords.txt", "w")
for i in range(100):
output = ''.join(random.sample(data, random.randint(1, 3)))
if len(output) > 6:
f.write(output+'\n')
self.progressBar.setValue(i+1)
self.lineEdit_7.setText('Information: passwords saved to passwords.txt!')
open_file("passwords.txt")
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Passenerator"))
self.pushButton.setText(_translate("MainWindow", "Generate!"))
self.label.setText(_translate("MainWindow", "First Name:"))
self.label_2.setText(_translate("MainWindow", "Last Name:"))
self.label_3.setText(_translate("MainWindow", "ID Number:"))
self.label_4.setText(_translate("MainWindow", "Born City:"))
self.label_5.setText(_translate("MainWindow", "Born Country:"))
self.label_6.setText(_translate("MainWindow", "Phone Number:"))
self.lineEdit_7.setText(_translate("MainWindow", "Information: "))
self.label_8.setText(_translate("MainWindow", "Enter Any Other Words Like Guessed Passes Separate via ,"))
self.label_9.setText(_translate("MainWindow", "Birthday:"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "Main"))
self.label_7.setText(_translate("MainWindow", "<html><head/><body><p align=\"center\">This Software Make Passwords Based On Person\'s Information.</p><p align=\"center\"><span style=\" font-size:10pt; font-weight:600;\">By: Mr A</span></p><p align=\"center\"><a href=\"https://github.com/misteralipour\"><span style=\" text-decoration: underline; color:#0000ff;\">https://github.com/misteralipour</span></a></p></body></html>"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "About"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
from shapely.geometry import Point, LineString, Polygon
def createPointGeom(x_cord, y_cord):
return Point(x_cord, y_cord)
def createLineGeom(lst_points):
line = []
for elem in lst_points:
if (isinstance(elem, Point)):
line.append(elem)
return LineString(line)
def createPolyGeom(lst_points):
line = []
for elem in lst_points:
if (isinstance(elem, Point) or isinstance(elem, tuple)):
line.append(elem)
return Polygon(line)
def getCentroid(geometry):
if (isinstance(geometry, Point) or isinstance(geometry, LineString) or isinstance(geometry, Polygon)):
cen = geometry.centroid
return cen
def getArea(geometry):
if (isinstance(geometry, Polygon)):
ar = geometry.area
return ar
def getLength(geometry):
if (isinstance(geometry, Polygon)or isinstance(geometry, LineString)):
return geometry.length
else:
msg = "Error: LineString or Polygon geometries required!"
return msg
import numpy as np
travel_data = 'travelTimes_2015_Helsinki.txt'
helsinki_data = np.loadtxt(travel_data, delimiter=';', usecols = (5,6,7,8,9,10), skiprows = 1)
orig_points = []
dest_points = []
for elem in helsinki_data:
from_x = elem[0]
from_y = elem[1]
to_x = elem[2]
to_y = elem[3]
orig_points.append(createPointGeom(from_x, from_y))
dest_points.append(createPointGeom(to_x, to_y))
def linestrings(orig_pts, dest_pts):
lines = []
for x,y in zip(orig_pts, dest_pts):
lines.append(createLineGeom([x,y]))
return lines
def euclidian_dist(orig_pts, dest_pts):
linestring = linestrings(orig_pts, dest_pts)
coords = [elem.xy for elem in linestring]
x_coord = np.array([i[0] for i in coords])
y_coord = np.array([j[1] for j in coords])
sum_sq = np.sum(np.square(x_coord - y_coord))
dist = np.sqrt(sum_sq)
return dist
euclidian_distance = euclidian_dist(orig_points, dest_points)
print(euclidian_distance)
|
import pytest
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal_nulp
from sklearn.datasets import make_regression
try:
from mpi4py import MPI
except ImportError:
MPI = None
from pyuoi.datasets import make_classification, make_poisson_regression
from pyuoi.linear_model import (UoI_Lasso,
UoI_L1Logistic,
UoI_ElasticNet,
UoI_Poisson)
@pytest.mark.skipif(MPI is None, reason='MPI not installed.')
def test_variable_selection_lasso():
"""Test basic functionality of UoI_Lasso and that it finds right model"""
X, y, w = make_regression(coef=True, random_state=1)
lasso = UoI_Lasso(comm=MPI.COMM_WORLD)
lasso.fit(X, y)
true_coef = np.nonzero(w)[0]
fit_coef = np.nonzero(lasso.coef_)[0]
assert_array_equal(true_coef, fit_coef)
assert_array_almost_equal_nulp(true_coef, fit_coef)
@pytest.mark.skipif(MPI is None, reason='MPI not installed.')
def test_variable_selection_enet():
"""Test basic functionality of UoI_Lasso and that it finds right model"""
X, y, w = make_regression(coef=True, random_state=1)
enet = UoI_ElasticNet(comm=MPI.COMM_WORLD)
enet.fit(X, y)
true_coef = np.nonzero(w)[0]
fit_coef = np.nonzero(enet.coef_)[0]
assert_array_equal(true_coef, fit_coef)
assert_array_almost_equal_nulp(true_coef, fit_coef)
@pytest.mark.skipif(MPI is None, reason='MPI not installed.')
def test_l1logistic_binary():
"""Test that binary L1 Logistic runs in the UoI framework."""
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=6,
n_informative=n_inf,
n_features=20,
w_scale=4.,
include_intercept=True)
l1log = UoI_L1Logistic(random_state=10, comm=MPI.COMM_WORLD).fit(X, y)
assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .7
@pytest.mark.skipif(MPI is None, reason='MPI not installed.')
def test_l1logistic_multiclass():
"""Test that multiclass L1 Logistic runs in the UoI framework when all
classes share a support."""
n_features = 20
n_inf = 10
X, y, w, b = make_classification(n_samples=200,
random_state=10,
n_classes=5,
n_informative=n_inf,
n_features=n_features,
shared_support=True,
w_scale=4.)
l1log = UoI_L1Logistic(comm=MPI.COMM_WORLD).fit(X, y)
assert (np.sign(abs(w)) == np.sign(abs(l1log.coef_))).mean() >= .8
@pytest.mark.skipif(MPI is None, reason='MPI not installed.')
def test_poisson():
"""Test basic functionality of UoI_Lasso and that it finds right model"""
n_features = 20
n_inf = 10
X, y, w, b = make_poisson_regression(n_samples=200,
n_features=n_features,
n_informative=n_inf,
random_state=10)
poisson = UoI_Poisson(comm=MPI.COMM_WORLD)
poisson.fit(X, y)
assert (np.sign(abs(w)) == np.sign(abs(poisson.coef_))).mean() >= .6
|
__all__ = ['ev3dev', 'lego','mindsensors'] |
import os
import pickle
import sys
import time
import traceback
import numpy as np
if __name__ == '__main__':
# insert this project in path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
import sys
sys.path.insert(0, "/home/blankjul/workspace/pymop/")
sys.path.insert(0, "/Users/julesy/workspace/pymop/")
sys.path.insert(0, "/home/blankjul/workspace/pymoo/")
sys.path.insert(0, "/Users/julesy/workspace/pymoo/")
import pymop
from pymoo.optimize import minimize
# load the data for the experiments
fname = sys.argv[1]
with open(fname, 'rb') as f:
data = pickle.load(f)
start_time = time.time()
try:
if 'algorithm' in data:
res = data['algorithm'].solve(data['problem'], data['termination'], seed=data['seed'])
problem = data['problem']
else:
res = minimize(
*data['args'],
**data['kwargs']
)
problem = data['args'][0]
elapsed = (time.time() - start_time)
print(fname, "in --- %s seconds ---" % elapsed)
# create directory if necessary
out = sys.argv[2]
os.makedirs(os.path.dirname(out), exist_ok=True)
# if a feasible solution has been found
"""
if res.F is not None:
F = res.F
M = F
if problem.n_constr > 0:
M = np.hstack([F, res.CV])
# if no feasible solution was found
else:
F, CV = res.pop.get("F", "CV")
best = np.argmin(CV)
M = np.array([F[best], CV[best]])
"""
M = np.column_stack([res.pop.get("F"), res.pop.get("CV")])
np.savetxt(out, M)
except Exception as e:
traceback.print_exc()
print(e)
print("Error: %s" % fname)
|
from checkmatelib import CheckmateClient, CheckmateException
from pyramid.httpexceptions import HTTPTemporaryRedirect
class ViaCheckmateClient(CheckmateClient):
def __init__(self, request):
super().__init__(
host=request.registry.settings["checkmate_url"],
api_key=request.registry.settings["checkmate_api_key"],
)
self._request = request
def raise_if_blocked(self, url):
"""Raise a redirect to Checkmate if the URL is blocked.
This will sensibly apply all ignore reasons and other configuration for
Checkmate.
:param url: The URL to check
:raise HTTPTemporaryRedirect: If the URL is blocked
"""
try:
blocked = self.check_url(
url,
allow_all=self._request.registry.settings["checkmate_allow_all"],
blocked_for=self._request.params.get("via.blocked_for"),
ignore_reasons=self._request.registry.settings[
"checkmate_ignore_reasons"
],
)
except CheckmateException:
blocked = None
if blocked:
raise HTTPTemporaryRedirect(location=blocked.presentation_url)
|
from glob import glob
import os
import pandas as pd
import numpy as np
from pathlib import Path
import unittest
import shutil
import matplotlib.pyplot as plt
from clouds.preprocess import Preprocessor
from clouds.inference import Inference
from clouds.experiments import GeneralInferExperiment
from clouds.experiments.utils import load_config
from clouds.preprocess import make_mask
from utils import load_paths_dict, download_weights
class InferExperimentsTests(unittest.TestCase):
"""Testing preprocessing procedures
"""
def setUp(self):
"""Initializing the parameters:
"""
try:
self.img_names = [Path(fpath).name
for fpath in glob("resources/*.jpg")]
self.rle_df = pd.read_csv("resources/train_sample.csv")
except FileNotFoundError:
raise Exception("Please make sure to run tests within the",
"test directory.")
preprocess_yml_path = "resources/configs/create_dset.yml"
preprocess_config = load_config(preprocess_yml_path)
self.paths_dict = load_paths_dict(preprocess_config)
# Creates the directory if it does not already exist
for dir_ in ["train_out", "test_out", "masks_out"]:
# so it doesn't create the test dir (None)
dir_of_interest = self.paths_dict[dir_]
if dir_of_interest is not None:
if not os.path.isdir(dir_of_interest):
os.mkdir(dir_of_interest)
# Sets up the preprocessed sample to load from
self.out_shape_cv2 = (576, 384)
self.preprocessor = Preprocessor(self.rle_df, self.paths_dict,
self.out_shape_cv2)
self.preprocessor.execute_all()
download_weights()
def tearDown(self):
"""Deleting the created files
"""
shutil.rmtree(self.paths_dict["train_out"])
shutil.rmtree(self.paths_dict["masks_out"])
os.remove("fpn_resnet34_seg1_seed350_mvp_best.pth")
os.remove("submission.csv")
def test_Inference_create_sub(self):
"""Testing that Inference.create_sub() runs smoothly.
Not testing that it works properly.
"""
# Seg only for now
yml_path = "resources/configs/create_sub.yml"
experiment_config = load_config(yml_path)
exp = GeneralInferExperiment(experiment_config)
infer = Inference(exp.model, exp.loaders["test"],
**experiment_config["infer_params"])
out_df = infer.create_sub(sub=exp.sample_sub)
print(out_df.head())
print(out_df["EncodedPixels"])
self.assertTrue(isinstance(out_df, pd.DataFrame))
# Test how well it actually performed
make_mask_kwargs = {
"img_name": self.img_names[0],
"out_shape_cv2": (525, 350),
"num_classes": 4
}
sub_df = pd.read_csv("submission.csv")
pred = make_mask(sub_df, shape=(350, 525),
**make_mask_kwargs)
actual = make_mask(self.rle_df, shape=(1400, 2100),
**make_mask_kwargs)
mean_dice = mean_dice_coef(actual[None], pred[None])
img_name = make_mask_kwargs["img_name"]
print(f"Mean Dice for {img_name}: {mean_dice}")
self.assertTrue(mean_dice > 0.5 and mean_dice < 0.9)
# visual prediction check
label_names = ["Fish", "Flower", "Gravel", "Sugar"]
for channel, label in zip(range(4), label_names):
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 12))
ax1.imshow(actual[:, :, channel])
ax1.set_title(f"{label} Mask")
ax2.imshow(pred[:, :, channel])
ax2.set_title(f"{label} Prediction")
plt.show()
def single_dice_coef(y_true, y_pred_bin):
# shape of y_true and y_pred_bin: (height, width)
intersection = np.sum(y_true * y_pred_bin)
if (np.sum(y_true) == 0) and (np.sum(y_pred_bin) == 0):
return 1
return (2*intersection) / (np.sum(y_true) + np.sum(y_pred_bin))
def mean_dice_coef(y_true, y_pred_bin):
# shape of y_true and y_pred_bin: (n_samples, height, width, n_channels)
batch_size = y_true.shape[0]
channel_num = y_true.shape[-1]
mean_dice_channel = 0.
for i in range(batch_size):
for j in range(channel_num):
channel_dice = single_dice_coef(y_true[i, :, :, j],
y_pred_bin[i, :, :, j])
mean_dice_channel += channel_dice/(channel_num*batch_size)
return mean_dice_channel
unittest.main(argv=[''], verbosity=2, exit=False)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import firewall
from neutron_lib.tests.unit.api.definitions import base
class FirewallDefinitionTestCase(base.DefinitionBaseTestCase):
extension_module = firewall
extension_resources = ('firewalls', 'firewall_policies', 'firewall_rules')
extension_attributes = ('action', 'admin_state_up', 'audited',
'destination_ip_address', 'destination_port',
'enabled', 'firewall_policy_id', 'firewall_rules',
'ip_version', 'position', 'protocol',
'source_ip_address', 'source_port')
|
# stdlib
import os
from io import StringIO
# 3rd party
import coverage # type: ignore
import pytest
from coincidence import only_version
from coincidence.regressions import check_file_regression
from coverage.python import PythonParser # type: ignore
from domdf_python_tools.paths import PathPlus
from pytest_regressions.file_regression import FileRegressionFixture
# this package
import coverage_pyver_pragma
@pytest.mark.parametrize(
"version",
[
pytest.param("3.6", marks=only_version(3.6, "Output differs on each version.")),
pytest.param("3.7", marks=only_version(3.7, "Output differs on each version.")),
pytest.param("3.8", marks=only_version(3.8, "Output differs on each version.")),
pytest.param("3.9", marks=only_version(3.9, "Output differs on each version.")),
pytest.param("3.10", marks=only_version("3.10", "Output differs on each version.")),
]
)
def test_plugin(tmp_pathplus: PathPlus, file_regression: FileRegressionFixture, version):
coverage_pyver_pragma.coverage_init()
assert PythonParser.lines_matching is coverage_pyver_pragma.PythonParser.lines_matching
cov = coverage.Coverage()
cov.start()
# this package
import tests.demo_code
cov.stop()
cov.save()
output = StringIO()
cov.report(morfs=[tests.demo_code.__file__], file=output)
# cov.html_report(morfs=[tests.demo_code.__file__])
cov.erase()
buf = output.getvalue().replace(tests.demo_code.__file__, "demo_code.py")
buf = buf.replace(os.path.sep, os.path.altsep or os.path.sep)
check_file_regression(buf, file_regression)
|
# This is a test.
|
import errno
import functools
import numpy as np
import os
from osgeo import gdal, gdalnumeric
class RasterFile(object):
def __init__(self, filename, band_number=1):
self.file = str(filename)
self._band_number = band_number
self._geotransform = None
self._extent = None
self._xy_meshgrid = None
self._mad = None
self._slope = None
self._aspect = None
@property
def file(self):
return self._file
@file.setter
def file(self, filename):
if os.path.exists(filename):
self._file = gdal.Open(filename)
else:
raise FileNotFoundError(
errno.ENOENT, os.strerror(errno.ENOENT), filename
)
@property
def band_number(self):
return self._band_number
@band_number.setter
def band_number(self, band_number):
self._band_number = band_number
@property
def geo_transform(self):
if self._geotransform is None:
self._geotransform = self.file.GetGeoTransform()
return self._geotransform
@property
def x_top_left(self):
return self.geo_transform[0]
@property
def y_top_left(self):
return self.geo_transform[3]
@property
def x_resolution(self):
return self.geo_transform[1]
@property
def y_resolution(self):
return self.geo_transform[5]
@property
def extent(self):
if self._extent is None:
x_bottom_right = \
self.x_top_left + self.file.RasterXSize * self.x_resolution
y_bottom_right = \
self.y_top_left + self.file.RasterYSize * self.y_resolution
self._extent = (
self.x_top_left, x_bottom_right,
self.y_top_left, y_bottom_right
)
return self._extent
@property
def xy_meshgrid(self):
"""
Upper Left coordinate for each cell
:return: Numpy meshgrid
"""
if self._xy_meshgrid is None:
x_size = self.file.RasterXSize
y_size = self.file.RasterYSize
self._xy_meshgrid = np.meshgrid(
np.arange(
self.x_top_left,
self.x_top_left + x_size * self.x_resolution,
self.x_resolution,
dtype=np.half,
),
np.arange(
self.y_top_left,
self.y_top_left + y_size * self.y_resolution,
self.y_resolution,
dtype=np.half,
)
)
return self._xy_meshgrid
def band_values(self, **kwargs):
"""
Method to read band from arguments or from initialized raster.
Will mask values defined in the band NoDataValue and store this mask
with the `current_mask` property if the band is the same as the
initialized one.
:param kwargs:
'band_number': band_number to read instead of the one given with
the initialize call.
:return: Numpy masked array
"""
band_number = kwargs.get('band_number', self.band_number)
band = self.file.GetRasterBand(band_number)
values = np.ma.masked_values(
gdalnumeric.BandReadAsArray(band),
band.GetNoDataValue(),
copy=False
)
del band
return values
def get_raster_attribute(self, attribute, **kwargs):
raster = gdal.DEMProcessing(
'', self.file, attribute, format='MEM', **kwargs
)
raster_band = raster.GetRasterBand(1)
raster_values = np.ma.masked_values(
gdalnumeric.BandReadAsArray(raster_band),
raster_band.GetNoDataValue(),
copy=False
)
del raster
del raster_band
return raster_values.view(dtype=np.half)
@functools.lru_cache(16)
def hill_shade(self, **kwargs):
return self.get_raster_attribute('hillshade', **kwargs)
@property
def slope(self):
if self._slope is None:
self._slope = self.get_raster_attribute('slope')
return self._slope
@property
def aspect(self):
if self._aspect is None:
self._aspect = self.get_raster_attribute('aspect')
return self._aspect
def join_masks(self, attribute, other):
"""
Extend the numpy mask for given attribute with mask from given other
masked numpy array.
Note: This will *permanently* change the mask.
:param attribute: name of property to change the mask
:param other: Masked numpy array to extend the mask with
"""
attr = getattr(self, attribute)
attr.mask = np.ma.mask_or(attr.mask, other.mask)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2019-07-14 12:40
from __future__ import unicode_literals
import bluebottle.utils.fields
from decimal import Decimal
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('funding', '0013_auto_20190711_0927'),
]
operations = [
migrations.AlterField(
model_name='funding',
name='amount_matching',
field=bluebottle.utils.fields.MoneyField(currency_choices=[(b'EUR', b'Euro')], decimal_places=2, default=Decimal('0.0'), max_digits=12),
),
migrations.AlterField(
model_name='funding',
name='target',
field=bluebottle.utils.fields.MoneyField(currency_choices=[(b'EUR', b'Euro')], decimal_places=2, default=Decimal('0.0'), max_digits=12),
)
]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from netests.workers.device_ssh import DeviceSSH
from netests.exceptions.netests_exceptions import NetestsFunctionNotPossible
from netests.converters.bgp.juniper.ssh import _juniper_bgp_ssh_converter
from netests.converters.facts.juniper.ssh import _juniper_facts_ssh_converter
from netests.converters.lldp.juniper.ssh import _juniper_lldp_ssh_converter
from netests.converters.ospf.juniper.ssh import _juniper_ospf_ssh_converter
from netests.converters.vrf.juniper.ssh import _juniper_vrf_ssh_converter
from netests.constants import (
BGP_SESSIONS_HOST_KEY,
FACTS_DATA_HOST_KEY,
LLDP_DATA_HOST_KEY,
OSPF_SESSIONS_HOST_KEY,
VRF_DATA_KEY,
JUNOS_GET_BGP,
JUNOS_GET_BGP_RID,
JUNOS_GET_BGP_VRF,
JUNOS_GET_BGP_VRF_RID,
JUNOS_GET_FACTS,
JUNOS_GET_INT,
JUNOS_GET_MEMORY,
JUNOS_GET_CONFIG_SYSTEM,
JUNOS_GET_SERIAL,
JUNOS_GET_LLDP,
JUNOS_GET_OSPF_NEI,
JUNOS_GET_OSPF_RID,
JUNOS_GET_OSPF_NEI_VRF,
JUNOS_GET_OSPF_RID_VRF,
JUNOS_GET_VRF_DETAIL
)
class BGPJuniperSSH(DeviceSSH):
def __init__(self, task, options={}):
super().__init__(
task=task,
commands={
"default_vrf": {
"data": JUNOS_GET_BGP,
"rid": JUNOS_GET_BGP_RID
},
"vrf": {
"data": JUNOS_GET_BGP_VRF,
"rid": JUNOS_GET_BGP_VRF_RID
},
},
vrf_loop=True,
converter=_juniper_bgp_ssh_converter,
key_store=BGP_SESSIONS_HOST_KEY,
options=options
)
class CDPJuniperSSH(DeviceSSH):
def __init__(self, task, options={}):
raise NetestsFunctionNotPossible(
"Juniper doesn't support CDP"
)
class FactsJuniperSSH(DeviceSSH):
def __init__(self, task, options={}):
super().__init__(
task=task,
commands={
"default_vrf": {
"get_infos_sys": JUNOS_GET_FACTS,
"get_infos_int": JUNOS_GET_INT,
"get_infos_memory": JUNOS_GET_MEMORY,
"get_infos_config": JUNOS_GET_CONFIG_SYSTEM,
"get_infos_serial": JUNOS_GET_SERIAL
}
},
vrf_loop=False,
converter=_juniper_facts_ssh_converter,
key_store=FACTS_DATA_HOST_KEY,
options=options
)
class LLDPJuniperSSH(DeviceSSH):
def __init__(self, task, options={}):
super().__init__(
task=task,
commands={
"default_vrf": {
"no_key": JUNOS_GET_LLDP
}
},
vrf_loop=False,
converter=_juniper_lldp_ssh_converter,
key_store=LLDP_DATA_HOST_KEY,
options=options
)
class OSPFJuniperSSH(DeviceSSH):
def __init__(self, task, options={}):
super().__init__(
task=task,
commands={
"default_vrf": {
"data": JUNOS_GET_OSPF_NEI,
"rid": JUNOS_GET_OSPF_RID
},
"vrf": {
"data": JUNOS_GET_OSPF_NEI_VRF,
"rid": JUNOS_GET_OSPF_RID_VRF
},
},
vrf_loop=True,
converter=_juniper_ospf_ssh_converter,
key_store=OSPF_SESSIONS_HOST_KEY,
options=options
)
class VRFJuniperSSH(DeviceSSH):
def __init__(self, task, options={}):
super().__init__(
task=task,
commands={
"default_vrf": {
"no_key": JUNOS_GET_VRF_DETAIL
}
},
vrf_loop=False,
converter=_juniper_vrf_ssh_converter,
key_store=VRF_DATA_KEY,
options=options
)
|
from . import jalali
from django.utils import timezone
def converter(time):
jmonth=["January","February","March","April","May","June","July","August","September","October","November","December"]
time = timezone.localtime(time)
time_to_str= f"{time.year},{time.month},{time.day}"
time_to_tuple=jalali.Gregorian(time_to_str).persian_tuple()
time_to_list=list(time_to_tuple)
for index , month in enumerate(jmonth):
if time_to_list[1]==index + 1:
time_to_list[1]=month
break
output= f"{time_to_list[2]} {time_to_list[1]} {time_to_list[0]}, Hour {time.hour}:{time.minute}"
return output
|
import numpy as np
__all__ = [
"calcTisserandParameter"
]
# This code generates the dictionary of semi-major axes for the
# third body needed for the Tisserand parameter
#
#
# from astropy.time import Time
# from thor.utils import getHorizonsElements
#
# ids = ["199", "299", "399", "499", "599", "699", "799", "899"]
# elements = getHorizonsElements(ids, times, id_type="majorbody")
#
# MAJOR_BODIES = {}
# for i, r in elements[["targetname", "a"]].iterrows():
# body_name = r["targetname"].split(" ")[0].lower()
# MAJOR_BODIES[body_name] = r["a"]
#
MAJOR_BODIES = {
'mercury': 0.3870970330236769,
'venus': 0.723341974974844,
'earth': 0.9997889954736553,
'mars': 1.523803685638066,
'jupiter': 5.203719697535582,
'saturn': 9.579110220472034,
'uranus': 19.18646168457971,
'neptune': 30.22486701698071
}
def calcTisserandParameter(a, e, i, third_body="jupiter"):
"""
Calculate Tisserand's parameter used to identify potential comets.
For example, objects with Tisserand parameter's with respect to Jupiter greater than 3 are
typically asteroids, whereas Jupiter family comets may have Tisserand's parameter
between 2 and 3. Damocloids have Jupiter Tisserand's parameter of less than 2.
Parameters
----------
a : float or `~numpy.ndarray` (N)
Semi-major axis in au.
e : float or `~numpy.ndarray` (N)
Eccentricity.
i : float or `~numpy.ndarray` (N)
Inclination in degrees.
third_body : str
Name of planet with respect to which Tisserand's parameter
should be calculated.
Returns
-------
Tp : float or `~numpy.ndarray` (N)
Tisserand's parameter.
"""
i_rad = np.radians(i)
major_bodies = MAJOR_BODIES.keys()
if third_body not in major_bodies:
err = (
f"third_body should be one of {','.join(major_bodies)}"
)
raise ValueError(err)
ap = MAJOR_BODIES[third_body]
Tp = ap / a + 2 * np.cos(i_rad) * np.sqrt(a / ap * (1 - e**2))
return Tp
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import getpass
import os
import db_utils as dbutils
import fixture_utils as fixutils
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
def before_all(context):
os.environ['LINES'] = "100"
os.environ['COLUMNS'] = "100"
os.environ['PAGER'] = 'cat'
# VERTICA_URL specifies the Vertica database used for testing
url = os.getenv('VERTICA_URL')
if not url:
raise Exception('You must configure VERTICA_URL environment variable')
url = urlparse(url)
context.conf = {
'host': url.hostname or 'localhost',
'user': url.username or getpass.getuser(),
'pass': url.password or '',
'port': int(url.port or 5433),
'dbname': url.path[1:] # Ignore leading slash
}
context.exit_sent = False
context.fixture_data = fixutils.read_fixture_files()
context.cn = dbutils.create_cn(
context.conf['host'], context.conf['pass'], context.conf['user'],
context.conf['dbname'], context.conf['port'])
def after_scenario(context, _):
"""
Cleans up after each test complete.
"""
if hasattr(context, 'cli') and not context.exit_sent:
context.cli.sendline('DROP SCHEMA IF EXISTS vcli_test CASCADE;')
context.cli.expect_exact('%s=>' % context.conf['dbname'], timeout=2)
# Terminate nicely
context.cli.terminate()
if hasattr(context, 'temp_filename'):
if os.path.exists(context.temp_filename):
os.remove(context.temp_filename)
|
import pymysql
from config import host, port, user, password, db, sessions_dir
import datetime
import os
def get_db():
try:
# return pymysql.connect(host=host, port=port, user=user, password=password, db=db)
return pymysql.connect(host=host, port=port, user=user, password=password, db=db, use_unicode=True,
charset="utf8")
except Exception as e:
print("数据库连接失败")
print(e)
def get_phone(category):
'''
获取手机号
:return:
'''
conn = get_db()
sql = "select phone, id from phone where status=-1 and category=%s limit 1"
cursor = conn.cursor()
cursor.execute(sql, (category,))
result = cursor.fetchone()
if result is not None:
phone = result[0]
id = result[1]
else:
phone = None
id = None
conn.close()
return phone, id
def change_status(phone_id, status):
'''
获取所有信息后修改状态
:param phone:
:return:
'''
conn = get_db()
cursor = conn.cursor()
sql = "update phone set status=%s where id = %s"
try:
cursor.execute(sql, (status, phone_id))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_user_info(user_info):
phone = user_info["phone"]
user_id = user_info["id"]
first_name = user_info["first_name"]
last_name = user_info["last_name"]
username = user_info["username"]
bot = 1 if user_info["bot"] else 0
conn = get_db()
cursor = conn.cursor()
# sql = "select * from user where user_id=%s"
# cursor.execute(sql, (user_id,))
# user = cursor.fetchone()
# if user:
# print("该用户已存在...")
# else:
# sql = "insert ignore into user (phone, user_id, first_name, last_name, username, bot) values (%s, %s,%s,%s,%s,%s)"
if phone is not None:
sql = "insert into user (phone, user_id, first_name, last_name, username, bot) values (%s,%s,%s,%s,%s,%s) on duplicate key update phone=values(phone)"
else:
sql = "insert ignore into user (phone, user_id, first_name, last_name, username, bot) values (%s,%s,%s,%s,%s,%s)"
try:
cursor.execute(sql, (phone, user_id, first_name, last_name, username, bot))
conn.commit()
print(str(user_id) + '插入user信息成功')
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_many_user_info(user_info_list):
data = []
for user_info in user_info_list:
phone = user_info["phone"]
user_id = user_info["id"]
first_name = user_info["first_name"]
last_name = user_info["last_name"]
username = user_info["username"]
bot = 1 if user_info["bot"] else 0
tup = (phone, user_id, first_name, last_name, username, bot)
data.append(tup)
conn = get_db()
cursor = conn.cursor()
# sql = "select * from user where user_id=%s"
# cursor.execute(sql, (user_id,))
# user = cursor.fetchone()
# if user:
# print("该用户已存在...")
# else:
sql = "insert ignore into user (phone, user_id, first_name, last_name, username, bot) values (%s, %s,%s,%s,%s,%s)"
try:
cursor.executemany(sql, data)
conn.commit()
print('插入user信息成功')
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_check_user_info(user_info, phone, by_phone):
phone = phone
user_id = user_info["id"]
first_name = user_info["first_name"]
last_name = user_info["last_name"]
username = user_info["username"]
bot = 1 if user_info["bot"] else 0
status = None
if user_info["status"]:
if user_info["status"]["was_online"]:
status = user_info["status"]["was_online"]
elif user_info["status"]["expires"]:
status = user_info["status"]["expires"]
conn = get_db()
cursor = conn.cursor()
sql = "select * from check_user where phone=%s"
cursor.execute(sql, (phone,))
user = cursor.fetchone()
if user:
print("该手机号已存在...")
else:
sql = "insert into check_user (phone, user_id, first_name, last_name, username, status, bot, by_phone, by_time) values (%s,%s,%s,%s,%s,%s,%s,%s,%s)"
try:
cursor.execute(sql, (
phone, user_id, first_name, last_name, username, status, bot, by_phone, datetime.datetime.now()))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def selcet_the_last_check_user(start):
conn = get_db()
cursor = conn.cursor()
sql = "select * from check_user order by phone desc limit 1"
cursor.execute(sql)
user = cursor.fetchone()
if user:
return user[1]
else:
return None
def get_the_count_of_check_user():
conn = get_db()
cursor = conn.cursor()
sql = "select count(phone) from check_user"
cursor.execute(sql)
count = cursor.fetchone()
return count[0]
def get_the_phone_from_check_user_by_index(index):
conn = get_db()
cursor = conn.cursor()
sql = "select phone from check_user limit %s, 1"
cursor.execute(sql, (index,))
phone = cursor.fetchone()
return phone[0]
def get_info_from_check_user_by_phone(phone):
conn = get_db()
cursor = conn.cursor()
sql = "select * from check_user where phone=%s"
cursor.execute(sql, (phone,))
info = cursor.fetchone()
return info
def insert_authorization(authorization_info, user_id):
hash = str(authorization_info["hash"])
device_model = authorization_info["device_model"]
platform = authorization_info["platform"]
system_version = authorization_info["system_version"]
app_name = authorization_info["app_name"]
app_version = authorization_info["app_version"]
date_created = authorization_info["date_created"]
date_active = authorization_info["date_active"]
ip = authorization_info["ip"]
country = authorization_info["country"]
region = authorization_info["region"]
official_app = 1 if authorization_info["official_app"] else 0
conn = get_db()
cursor = conn.cursor()
sql = "select * from authorization where hash=%s"
cursor.execute(sql, (hash,))
authorization = cursor.fetchone()
if authorization and hash != '0':
print("该authorization已存在...")
elif authorization and hash == '0':
print("更新该authorization...")
sql = "update authorization set user_id=%s, hash=%s, device_model=%s, platform=%s, system_version=%s, " \
"app_name=%s, app_version=%s, date_created=str_to_date(%s,'%%Y-%%m-%%d %%H:%%i:%%s'), " \
"date_active=str_to_date(%s,'%%Y-%%m-%%d %%H:%%i:%%s'), ip=%s, country=%s, region=%s, official_app=%s " \
"where hash='0' "
try:
cursor.execute(sql, (
user_id, hash, device_model, platform, system_version, app_name, app_version, date_created, date_active,
ip, country, region, official_app))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
else:
sql = "insert into authorization (user_id, hash, device_model, platform, system_version, app_name, " \
"app_version, date_created, date_active, ip, country, region, official_app) values (%s, %s, %s, %s, %s, " \
"%s, %s, str_to_date(%s,'%%Y-%%m-%%d %%H:%%i:%%s'), str_to_date(%s,'%%Y-%%m-%%d %%H:%%i:%%s'), %s, %s, " \
"%s, %s) "
try:
cursor.execute(sql, (
user_id, hash, device_model, platform, system_version, app_name, app_version, date_created, date_active,
ip, country, region, official_app))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_message(message_info, user_id):
message_id = message_info["id"]
message = message_info["message"]
date = message_info["date"]
from_id = message_info["from_id"]
to_id = message_info["to_id"]
conn = get_db()
cursor = conn.cursor()
sql = "select * from message where message_id=%s and from_id=%s and to_id=%s"
cursor.execute(sql, (message_id, from_id, to_id))
m = cursor.fetchone()
if m:
print("该message已存在...")
else:
sql = "insert into message (user_id, message_id, message, date, from_id, to_id) values (%s, %s, %s, str_to_date(%s," \
"'%%Y-%%m-%%d %%H:%%i:%%s'), %s, %s) "
try:
cursor.execute(sql, (user_id, message_id, message, date, from_id, to_id))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_contact(user_id, contact_user_id):
conn = get_db()
cursor = conn.cursor()
sql = "select * from contact where user_id=%s and contact_user_id=%s"
cursor.execute(sql, (user_id, contact_user_id))
relationship = cursor.fetchone()
if relationship:
print("该contact已存在...")
else:
try:
sql = "insert into contact (user_id, contact_user_id) values (%s, %s)"
cursor.execute(sql, (user_id, contact_user_id))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_channel(channel_info, user_id, num):
channel_id = channel_info["id"]
title = channel_info["title"]
username = channel_info["username"]
conn = get_db()
cursor = conn.cursor()
sql = "select * from channel where channel_id=%s"
cursor.execute(sql, (channel_id,))
channel = cursor.fetchone()
if channel:
print("该channel已存在...")
else:
try:
sql = "insert into channel (user_id, channel_id, title, username, participants_count) values (%s, %s, %s, %s, %s)"
cursor.execute(sql, (user_id, channel_id, title, username, num))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_channel_user(channel_id, user_id):
conn = get_db()
cursor = conn.cursor()
sql = "select * from channel_user where channel_id=%s and user_id=%s"
cursor.execute(sql, (channel_id, user_id))
relationship = cursor.fetchone()
if relationship:
print("该channel_user已存在...")
else:
try:
sql = "insert into channel_user (channel_id, user_id) values (%s, %s)"
cursor.execute(sql, (channel_id, user_id))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_channel_many_user(channel_id, user_list):
conn = get_db()
cursor = conn.cursor()
data = []
for user in user_list:
tup = (channel_id, user["id"])
data.append(tup)
try:
sql = "insert ignore into channel_user (channel_id, user_id) values (%s, %s)"
cursor.executemany(sql, data)
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_group(group_info, user_id, num):
group_id = group_info["id"]
title = group_info["title"]
conn = get_db()
cursor = conn.cursor()
sql = "select * from group_ where group_id=%s"
cursor.execute(sql, (group_id,))
group = cursor.fetchone()
if group:
print("该group已存在...")
else:
try:
sql = "insert into group_ (user_id, group_id, title, participants_count) values (%s, %s, %s, %s)"
cursor.execute(sql, (user_id, group_id, title, num))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_group_user(group_id, user_id):
conn = get_db()
cursor = conn.cursor()
sql = "select * from group_user where group_id=%s and user_id=%s"
cursor.execute(sql, (group_id, user_id))
relationship = cursor.fetchone()
if relationship:
print("该channel_user已存在...")
else:
try:
sql = "insert into group_user (group_id, user_id) values (%s, %s)"
cursor.execute(sql, (group_id, user_id))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_group_many_user(group_id, user_list):
conn = get_db()
cursor = conn.cursor()
data = []
for user in user_list:
tup = (group_id, user["id"])
data.append(tup)
try:
sql = "insert ignore into group_user (group_id, user_id) values (%s, %s)"
cursor.executemany(sql, data)
conn.commit()
except Exception as e:
print(e)
conn.rollback()
conn.close()
def insert_phone(phone, origin_ip, destination_ip):
conn = get_db()
cursor = conn.cursor()
sql = "select * from phone where phone = %s"
cursor.execute(sql, (phone))
phone_existed = cursor.fetchone()
if phone_existed:
print('该手机号已经在数据库中...')
else:
try:
sql = "insert into phone (phone, origin_ip, destination_ip) values (%s, %s, %s)"
cursor.execute(sql, (phone, origin_ip, destination_ip))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
def insert_session(phone, session):
conn = get_db()
cursor = conn.cursor()
sql = "select * from login_phone where phone = %s and session is not null"
cursor.execute(sql, (phone))
phone_existed = cursor.fetchone()
if phone_existed:
print('该手机号已经在数据库中...')
else:
try:
sql = "insert into login_phone (phone, session, time) values (%s, %s, %s)"
cursor.execute(sql, (phone, session, datetime.datetime(2019, 1, 1)))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
def insert_session_platform(phone, session):
conn = get_db()
cursor = conn.cursor()
sql = "select * from platform_login_phone where phone = %s and session is not null"
cursor.execute(sql, (phone))
phone_existed = cursor.fetchone()
if phone_existed:
print('该手机号已经在数据库中...')
else:
try:
sql = "insert into platform_login_phone (phone, session, time) values (%s, %s, %s)"
cursor.execute(sql, (phone, session, datetime.datetime.now()))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
def update_session_time(phone, use_time):
conn = get_db()
cursor = conn.cursor()
try:
sql = "update login_phone set time=%s where phone=%s"
cursor.execute(sql, (use_time, phone))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
def delete_session(phone):
conn = get_db()
cursor = conn.cursor()
try:
sql = "update login_phone set session=NULL where phone=%s"
cursor.execute(sql, (phone))
conn.commit()
except Exception as e:
print(e)
conn.rollback()
def get_session(phone):
conn = get_db()
cursor = conn.cursor()
sql = "select session, time from login_phone where phone=%s"
cursor.execute(sql, (phone))
result = cursor.fetchone()
if result is not None:
session = result[0]
time = result[1]
else:
session = None
time = None
conn.close()
return session, time
def get_session_platform(phone):
conn = get_db()
cursor = conn.cursor()
sql = "select session, time from platform_login_phone where phone=%s"
cursor.execute(sql, (phone))
result = cursor.fetchone()
if result is not None:
session = result[0]
time = result[1]
else:
session = None
time = None
conn.close()
return session, time
def get_login_phones():
conn = get_db()
cursor = conn.cursor()
sql = "select phone from login_phone where session is not null"
cursor.execute(sql)
result = cursor.fetchall()
# print(result)
return result
# 自动注册所需要的,避免重复从接码平台获取同一个手机号
def auto_register_get_phone(phone):
conn = get_db()
cursor = conn.cursor()
sql = "select phone from check_phones where phone=%s"
cursor.execute(sql, (phone))
result = cursor.fetchone()
if result is not None:
conn.close()
return True
else:
conn.close()
return False
def auto_register_insert_phone(phone, category):
conn = get_db()
cursor = conn.cursor()
sql = "insert ignore into check_phones (phone, category, time) values (%s, %s, %s)"
try:
cursor.execute(sql, (phone, category, datetime.datetime.now()))
conn.commit()
print('插入phone信息成功')
except Exception as e:
print(e)
conn.rollback()
conn.close()
if __name__ == '__main__':
count = get_the_count_of_check_user()
print(count)
import random
index = random.randint(0, count - 1)
phone = get_the_phone_from_check_user_by_index(0)
print(phone)
info = get_info_from_check_user_by_phone("85262008167")
print(info)
# auto_register_insert_phone("my_test", "my_test")
# participants = []
#
# import time
#
# start_time = time.time()
# iid = 1234567890
# for i in range(100):
# iid += i
# participantest = {
# "phone": "",
# "id": iid,
# "first_name": None,
# "last_name": None,
# "username": None,
# "bot": False
# }
# participants.append(participantest)
# insert_many_user_info(participants)
# insert_channel_many_user(1472612002, participants)
# print(time.time() - start_time)
# insert_session_platform('1', '1')
# import datetime
#
# update_session_time('12', datetime.datetime.now())
# get_session('8615201615409')
# print(datetime.datetime.now() - datetime.datetime(2018, 12, 1) > datetime.timedelta(seconds=3600))
# get_login_phones()
# fs = os.listdir(sessions_dir)
# for f in fs:
# # print(f.split('_')[0])
# phone = f.split('_')[0]
# with open(sessions_dir + f, 'r') as ff:
# session = ff.read()
# print(phone, session)
# insert_session(phone, session)
|
# coding: utf-8
# In[1]:
#Boolean / logical values:
# In[2]:
#True
# In[3]:
#False
# In[4]:
4<5
# In[5]:
10>100
# In[6]:
4 == 5
# In[7]:
# == equals
# != or <>
# <
# >
# <=
# >=
# and (in R its &)
# or (in R its /)
# not
# ---
# In[8]:
result=4<5
# In[9]:
result
# In[10]:
type(result)
# In[11]:
result2 = not(5>1)
# In[12]:
result2
# In[13]:
result or result2 #checks, if at least one element is true
# In[14]:
result and result2 #here all elements have to be true to make this true
# In[15]:
result3=4!=4
result3
# In[16]:
result4=4!=5
result4
# In[17]:
print(result)
print(result2)
print(result3)
print(result4)
# In[18]:
result and result4
# In[19]:
result2 and result3
# In[20]:
result or result2
# In[21]:
result3 or result4
# In[22]:
(result or result2) and (result3 or result4)
# In[23]:
result or result2 and result3 or result4
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db import models
# from .utilidades import ESTADO,TIPO_CANCHA,TIPOUSR,ESTADO_CUOTA,TRIBUTO_CUOTA,TIPO_LOGIN
from django.contrib.auth.models import User
from datetime import datetime,date
from dateutil.relativedelta import *
from django.conf import settings
import os
from .utilidades import COMPROB_FISCAL,CATEG_FISCAL,PROVINCIAS,TIPO_CTA,TIPO_LOGOTIPO,get_image_name
from django.core.files.storage import default_storage
#Tabla de la Base de Configuracion
class gral_afip_categorias(models.Model):
id = models.AutoField(primary_key=True,db_index=True)
letra = models.CharField(u'Letra',choices=COMPROB_FISCAL,max_length=1,blank=True, null=True)
importe = models.DecimalField(max_digits=15, decimal_places=2, blank=True, null=True,default=0)
class Meta:
db_table = 'gral_afip_categorias'
def __unicode__(self):
return u'%s' % self.letra
class gral_empresa(models.Model):
id = models.AutoField(primary_key=True,db_index=True)
nombre = models.CharField('Nombre',max_length=100)
categ_fiscal = models.IntegerField(u'Categoría Fiscal',choices=CATEG_FISCAL, blank=True, null=True)
cuit = models.CharField('CUIT',max_length=50)
iibb = models.CharField(u'Nº IIBB',max_length=50,blank=True, null=True)
fecha_inicio_activ = models.DateTimeField('Fecha Inicio Actividades',null=True)
domicilio = models.CharField('Domicilio',max_length=200,blank=True, null=True)
provincia = models.IntegerField('Provincia',choices=PROVINCIAS, blank=True, null=True,default=12)
localidad = models.CharField('Localidad',max_length=100,blank=True, null=True)
cod_postal = models.CharField('CP',max_length=50,blank=True, null=True)
email = models.EmailField('Email')
telefono = models.CharField(u'Teléfono',max_length=50,blank=True, null=True)
celular = models.CharField('Celular',max_length=50,blank=True, null=True)
baja = models.BooleanField(default=False)
fecha_creacion = models.DateField(auto_now_add = True)
nombre_fantasia = models.CharField(u'Nombre Fantasía',max_length=200)
dias_vencimiento_cpbs = models.IntegerField(u'Días Vencimiento CPBS', blank=True, null=True,default=0)
pprincipal_aviso_tareas = models.BooleanField(u'Tareas Pendientes al inicio',default=False)
pprincipal_panel_cpbs = models.BooleanField(u'Panel Últimos CPBs',default=False)
pprincipal_estadisticas = models.BooleanField(u'Panel Estadísticas',default=False)
fp_facturas = models.BooleanField(u'Mostrar FP en Facturas',default=True)
firma_facturas = models.BooleanField(u'Mostrar Firma en Facturas',default=True)
barra_busq_meses_atras = models.IntegerField(blank=True, null=True,default=2)
pto_vta_defecto = models.ForeignKey('comprobantes.cpb_pto_vta',verbose_name=u'Pto. Venta x Defecto',db_column='pto_vta_defecto',blank=True, null=True,on_delete=models.SET_NULL)
fe_crt = models.CharField('Nombre Archivo CRT',max_length=50,blank=True, null=True)
fe_key = models.CharField('Nombre Archivo Key',max_length=50,blank=True, null=True)
homologacion = models.BooleanField(u'Modo HOMOLOGACIÓN',default=True)
mail_cuerpo = models.CharField(u'Cuerpo del Email (envío de Comprobantes)',max_length=500,blank=True, null=True)
mail_servidor = models.CharField(u'Servidor SMTP',max_length=100, blank=True)
mail_puerto = models.IntegerField(u'Puerto',blank=True, null=True,default=587)
mail_usuario =models.CharField('Usuario',max_length=100, blank=True)
mail_password =models.CharField('Password',max_length=100, blank=True)
afip_categoria = models.ForeignKey('general.gral_afip_categorias',verbose_name=u'Categoría AFIP (si corresponde)',db_column='afip_categoria',blank=True, null=True,on_delete=models.SET_NULL)
#ruta_logo = models.ImageField(upload_to=get_image_name,db_column='ruta_logo', max_length=100,null=True, blank=True) # Field name made lowercase.
ruta_logo = models.CharField(db_column='ruta_logo', max_length=100,null=True, blank=True) # Field name made lowercase.
tipo_logo_factura = models.IntegerField(u'Tipo Logotipo',choices=TIPO_LOGOTIPO, blank=True, null=True)
usa_impuestos = models.BooleanField(u'Utiliza Impuestos Adicionales',default=False)
describe_impuestos = models.BooleanField(u'Impuestos en Detalle Factura',default=False)
nombre_impuesto1 = models.CharField('Nombre Impuesto1',max_length=50,blank=True, null=True)
nombre_impuesto2 = models.CharField('Nombre Impuesto2',max_length=50,blank=True, null=True)
codbar_precio = models.BooleanField(u'Precio en Etiquetas CB',default=False)
codbar_detalle = models.BooleanField(u'Detalle en Etiquetas CB',default=False)
cbu = models.CharField('CBU',max_length=100, blank=True)
#cbu_alias = models.CharField('Alias CBU',max_length=20, blank=True)
class Meta:
db_table = 'gral_empresa'
def __unicode__(self):
return u'%s' % (self.nombre_fantasia)
def get_dias_venc(self):
if self.dias_vencimiento_cpbs:
return self.dias_vencimiento_cpbs
else:
return 0
def get_datos_mail(self):
d= {}
d['mail_cuerpo']= self.mail_cuerpo or u'Estimado/as les envío por este medio el comprobante solicitado.'
d['mail_servidor']= self.mail_servidor or settings.EMAIL_HOST
d['mail_puerto']= int(self.mail_puerto) or int(settings.EMAIL_PORT)
d['mail_usuario']= self.mail_usuario or settings.EMAIL_HOST_USER
d['mail_password']= self.mail_password or settings.EMAIL_HOST_PASSWORD
d['mail_origen']= self.email or d['mail_usuario']+'@ironweb.com.ar'
return d
class gral_tareas(models.Model):
id = models.AutoField(primary_key=True,db_index=True)
estado = models.CharField(u'Estado',max_length=50,blank=True, null=True)
title = models.CharField(u'Título',max_length=200,blank=True, null=True)
detalle = models.TextField('Detalle',blank=True, null=True)
respuesta = models.CharField(u'Respuesta',max_length=500,blank=True, null=True)
color = models.CharField(u'Color',max_length=200,blank=True, null=True)
usuario_creador = models.ForeignKey('usuarios.usu_usuario',db_column='usuario_creador',blank=True, null=True,related_name='usuario_creador',on_delete=models.SET_NULL)
usuario_asignado = models.ForeignKey('usuarios.usu_usuario',db_column='usuario_asignado',blank=True, null=True,related_name='usuario_asignado',on_delete=models.SET_NULL)
fecha = models.DateTimeField(default=datetime.now)
fecha_creacion = models.DateField(auto_now=True)
empresa = models.ForeignKey('general.gral_empresa',db_column='empresa',blank=True, null=True,on_delete=models.SET_NULL)
class Meta:
db_table = 'gral_tareas'
def __unicode__(self):
return u'%s %s : %s' % (self.fecha,self.usuario_asignado,self.title)
class gral_plan_cuentas(models.Model):
id = models.AutoField(primary_key=True,db_index=True)
codigo = models.CharField(max_length=100)
nombre = models.CharField(max_length=100)
tipo = models.IntegerField(choices=TIPO_CTA, blank=True, null=True)
padre = models.ForeignKey('general.gral_plan_cuentas', db_column='padre',related_name="plan_ctas_padre",blank=True, null=True,on_delete=models.SET_NULL)
baja = models.BooleanField(default=False)
empresa = models.ForeignKey('general.gral_empresa',db_column='empresa',blank=True, null=True,on_delete=models.SET_NULL)
class Meta:
db_table = 'gral_plan_cuentas'
ordering = ['codigo','nombre']
def __unicode__(self):
return u'%s %s' % (self.codigo,self.nombre)
|
from dataclasses import dataclass
from typing import TYPE_CHECKING, Optional
import re
from pyquery import PyQuery
from utils import url_is_broken
if TYPE_CHECKING:
from entities.page import Page
@dataclass
class Link:
html: str
page: "Page"
@property
def href(self) -> str:
return PyQuery(self.html).attr('href')
@property
def text(self) -> str:
return PyQuery(self.html).text()
@property
def url(self) -> Optional[str]:
href = self.href
if href is None:
return None
if re.match(r'^https?://', href):
return href
if href.startswith('/'):
return f'{self.page.base_url}{href}'
return None
def is_internal(self) -> bool:
url = self.url
if url is None:
return False
return url.startswith(self.page.base_url)
async def is_broken(self) -> bool:
url = self.url
if url is None:
return False
is_broken = await url_is_broken(url)
return is_broken
def __str__(self):
return self.html
|
# -*- coding: utf-8 -*-
"""Data Access Objects for diseasescope REST server"""
import os
import logging
import shutil
import json
import glob
logger = logging.getLogger(__name__)
TASK_JSON = 'task.json'
TMP_RESULT = 'result.tmp'
RESULT = 'result.json'
STATUS_RESULT_KEY = 'status'
NOTFOUND_STATUS = 'notfound'
UNKNOWN_STATUS = 'unknown'
SUBMITTED_STATUS = 'submitted'
PROCESSING_STATUS = 'processing'
DONE_STATUS = 'done'
ERROR_STATUS = 'error'
STATUS_LIST = [UNKNOWN_STATUS, SUBMITTED_STATUS,
PROCESSING_STATUS, ERROR_STATUS,
DONE_STATUS]
# directory where token files named after tasks to delete
# are stored
DELETE_REQUESTS = 'delete_requests'
# key in result dictionary denoting the
# result data
RESULT_KEY = 'result'
NDEXURL_KEY = 'ndexurl'
HIVIEWURL_KEY = 'hiviewurl'
# key in result dictionary denoting input parameters
PARAMETERS_KEY = 'parameters'
NDEXSERVER_PARAM = 'ndexserver'
NDEXUSER_PARAM = 'ndexuser'
NDEXPASS_PARAM = 'ndexpass'
NDEXNAME_PARAM = 'ndexname'
HIVIEWURL_PARAM = 'hiviewurl'
DOID_PARAM = 'doid'
TISSUE_PARAM = 'tissue'
class FileBasedTask(object):
"""Represents a task
"""
BASEDIR = 'basedir'
STATE = 'state'
IPADDR = 'ipaddr'
UUID = 'uuid'
TASK_FILES = [TASK_JSON]
def __init__(self, taskdir, taskdict):
self._taskdir = taskdir
self._taskdict = taskdict
def delete_task_files(self):
"""
Deletes all files and directories pertaining to task
on filesystem
:return: None upon success or str with error message
"""
if self._taskdir is None:
return 'Task directory is None'
if not os.path.isdir(self._taskdir):
return ('Task directory ' + self._taskdir +
' is not a directory')
# this is a paranoid removal since we only are tossing
# the directory in question and files listed in TASK_FILES
try:
for entry in os.listdir(self._taskdir):
if entry not in FileBasedTask.TASK_FILES:
logger.error(entry + ' not in files created by task')
continue
fp = os.path.join(self._taskdir, entry)
if os.path.isfile(fp):
os.unlink(fp)
os.rmdir(self._taskdir)
return None
except Exception as e:
logger.exception('Caught exception removing ' + self._taskdir)
return ('Caught exception ' + str(e) + 'trying to remove ' +
self._taskdir)
def save_task(self):
"""
Updates task in datastore. For filesystem based
task this means rewriting the task.json file
:return: None for success otherwise string containing error message
"""
if self._taskdir is None:
return 'Task dir is None'
if self._taskdict is None:
return 'Task dict is None'
if not os.path.isdir(self._taskdir):
return str(self._taskdir) + ' is not a directory'
tjsonfile = os.path.join(self._taskdir, TASK_JSON)
logger.debug('Writing task data to: ' + tjsonfile)
with open(tjsonfile, 'w') as f:
json.dump(self._taskdict, f)
return None
def move_task(self, new_state,
error_message=None):
"""
Changes state of task to new_state
:param new_state: new state
:return: None
"""
taskattrib = self._get_uuid_ip_state_basedir_from_path()
if taskattrib is None or taskattrib[FileBasedTask.BASEDIR] is None:
return 'Unable to extract state basedir from task path'
if taskattrib[FileBasedTask.STATE] == new_state:
logger.debug('Attempt to move task to same state: ' +
self._taskdir)
return None
# if new state is error still put the task into
# done directory, but update error message in
# task json
if new_state == ERROR_STATUS:
new_state = DONE_STATUS
if error_message is None:
emsg = 'Unknown error'
else:
emsg = error_message
logger.info('Task set to error state with message: ' +
emsg)
self._taskdict['message'] = emsg
self.save_task()
logger.debug('Changing task: ' + str(taskattrib[FileBasedTask.UUID]) +
' to state ' + new_state)
ptaskdir = os.path.join(taskattrib[FileBasedTask.BASEDIR], new_state,
taskattrib[FileBasedTask.IPADDR],
taskattrib[FileBasedTask.UUID])
shutil.move(self._taskdir, ptaskdir)
self._taskdir = ptaskdir
return None
def _get_uuid_ip_state_basedir_from_path(self):
"""
Parses taskdir path into main parts and returns
result as dict
:return: {'basedir': basedir,
'state': state
'ipaddr': ip address,
'uuid': task uuid}
"""
if self._taskdir is None:
logger.error('Task dir not set')
return {FileBasedTask.BASEDIR: None,
FileBasedTask.STATE: None,
FileBasedTask.IPADDR: None,
FileBasedTask.UUID: None}
taskuuid = os.path.basename(self._taskdir)
ipdir = os.path.dirname(self._taskdir)
ipaddr = os.path.basename(ipdir)
if ipaddr == '':
ipaddr = None
statedir = os.path.dirname(ipdir)
state = os.path.basename(statedir)
if state == '':
state = None
basedir = os.path.dirname(statedir)
return {FileBasedTask.BASEDIR: basedir,
FileBasedTask.STATE: state,
FileBasedTask.IPADDR: ipaddr,
FileBasedTask.UUID: taskuuid}
def get_ipaddress(self):
"""
gets ip address
:return:
"""
res = self._get_uuid_ip_state_basedir_from_path()[FileBasedTask.IPADDR]
return res
def get_state(self):
"""
Gets current state of task based on taskdir
:return:
"""
return self._get_uuid_ip_state_basedir_from_path()[FileBasedTask.STATE]
def get_task_uuid(self):
"""
Parses taskdir path to get uuid
:return: string containing uuid or None if not found
"""
return self._get_uuid_ip_state_basedir_from_path()[FileBasedTask.UUID]
def get_task_summary_as_str(self):
"""
Prints quick summary of task
:return:
"""
res = self._get_uuid_ip_state_basedir_from_path()
return str(res)
def set_taskdir(self, taskdir):
"""
Sets task directory
:param taskdir:
:return:
"""
self._taskdir = taskdir
def get_taskdir(self):
"""
Gets task directory
:return:
"""
return self._taskdir
def set_taskdict(self, taskdict):
"""
Sets task dictionary
:param taskdict:
:return:
"""
self._taskdict = taskdict
def get_taskdict(self):
"""
Gets task dictionary
:return:
"""
return self._taskdict
def get_diseaseid(self):
"""
Gets alpha parameter
:return: alpha parameter or None
"""
if self._taskdict is None:
return None
if DOID_PARAM not in self._taskdict:
return None
res = self._taskdict[DOID_PARAM]
return res
def get_ndexname(self):
"""
Gets ndex name parameter
:return: ndex name parameter or None
"""
if self._taskdict is None:
return None
if NDEXNAME_PARAM not in self._taskdict:
return None
return self._taskdict[NDEXNAME_PARAM]
def get_ndexserver(self):
"""
Gets ndex server parameter
:return: ndex server or None
"""
if self._taskdict is None:
return None
if NDEXSERVER_PARAM not in self._taskdict:
return None
return self._taskdict[NDEXSERVER_PARAM]
def get_ndexuser(self):
"""
Gets ndex user parameter
:return: ndex user or None
"""
if self._taskdict is None:
return None
if NDEXUSER_PARAM not in self._taskdict:
return None
return self._taskdict[NDEXUSER_PARAM]
def get_ndexpass(self):
"""
Gets ndex password
:return: ndex password or None
"""
if self._taskdict is None:
return None
if NDEXPASS_PARAM not in self._taskdict:
return None
return self._taskdict[NDEXPASS_PARAM]
def get_hiviewurl(self):
"""
Gets ndex password
:return: ndex password or None
"""
if self._taskdict is None:
return None
if HIVIEWURL_PARAM not in self._taskdict:
return None
return self._taskdict[HIVIEWURL_PARAM]
class FileBasedSubmittedTaskFactory(object):
"""
Reads file system to get tasks
"""
def __init__(self, taskdir):
self._taskdir = taskdir
self._submitdir = None
if self._taskdir is not None:
self._submitdir = os.path.join(self._taskdir,
SUBMITTED_STATUS)
self._problemlist = []
def get_next_task(self):
"""
Looks for next task in task dir. currently finds the first
:return:
"""
if self._submitdir is None:
logger.error('Submit directory is None')
return None
if not os.path.isdir(self._submitdir):
logger.error(self._submitdir +
' does not exist or is not a directory')
return None
logger.debug('Examining ' + self._submitdir + ' for new tasks')
for entry in os.listdir(self._submitdir):
fp = os.path.join(self._submitdir, entry)
if not os.path.isdir(fp):
continue
for subentry in os.listdir(fp):
subfp = os.path.join(fp, subentry)
if os.path.isdir(subfp):
tjson = os.path.join(subfp, TASK_JSON)
if os.path.isfile(tjson):
try:
with open(tjson, 'r') as f:
jsondata = json.load(f)
return FileBasedTask(subfp, jsondata)
except Exception as e:
if subfp not in self._problemlist:
logger.info('Skipping task: ' + subfp +
' due to error reading json' +
' file: ' + str(e))
self._problemlist.append(subfp)
return None
def get_size_of_problem_list(self):
"""
Gets size of problem list
:return:
"""
return len(self._problemlist)
def get_problem_list(self):
"""
Gets problem list
:return:
"""
return self._problemlist
class DeletedFileBasedTaskFactory(object):
"""
Reads filesystem for tasks that should be deleted
"""
def __init__(self, taskdir):
"""
Constructor
:param taskdir:
"""
self._taskdir = taskdir
self._delete_req_dir = None
self._searchdirs = []
if self._taskdir is not None:
self._delete_req_dir = os.path.join(self._taskdir,
DELETE_REQUESTS)
self._searchdirs.append(os.path.join(self._taskdir,
PROCESSING_STATUS))
self._searchdirs.append(os.path.join(self._taskdir,
SUBMITTED_STATUS))
self._searchdirs.append(os.path.join(self._taskdir,
DONE_STATUS))
else:
logger.error('Taskdir is None')
def get_next_task(self):
"""
Gets next task that should be deleted
:return:
"""
if self._delete_req_dir is None:
logger.error('Delete request dir is None')
return None
if not os.path.isdir(self._delete_req_dir):
logger.error(self._delete_req_dir + ' is not a directory')
return None
logger.debug('Examining ' + self._delete_req_dir +
' for delete task requests')
for entry in os.listdir(self._delete_req_dir):
fp = os.path.join(self._delete_req_dir, entry)
if not os.path.isfile(fp):
continue
task = self._get_task_with_id(entry)
logger.info('Removing delete request file: ' + fp)
os.unlink(fp)
if task is None:
logger.info('Task ' + entry + ' not found')
continue
return task
return None
def _get_task_with_id(self, taskid):
"""
Uses glob to look for task with id under taskdir
:return: FileBasedTask object or None if not found
"""
for search_dir in self._searchdirs:
for entry in glob.glob(os.path.join(search_dir, '*', taskid)):
if not os.path.isdir(entry):
logger.error('Found match (' + entry +
'), but its not a directory')
continue
tjson = os.path.join(entry, TASK_JSON)
if os.path.isfile(tjson):
try:
with open(tjson, 'r') as f:
jsondata = json.load(f)
return FileBasedTask(entry, jsondata)
except Exception as e:
logger.exception('Unable to parse json for task ' +
entry + ' going to skip json: ' +
str(e))
return FileBasedTask(entry, {})
else:
logger.error('No json for task ' + entry +
' going to skip json')
return FileBasedTask(entry, {})
return None
|
from flask import Flask, request
from flask_restful import Resource, Api
import base64
app = Flask(__name__)
app.secret_key = 'Abhi1234'
api = Api(app)
class Picture(Resource):
# @jwt_required()
def post(self):
req_data = request.get_json()
f = open('C:/Users/prasanna/PycharmProjects/AutoSavePic_API/picStore/test.txt', 'w')
f.write(req_data["pic_str"])
f.close()
fh = open("C:/Users/prasanna/PycharmProjects/AutoSavePic_API/picStore/imageToSave.jpg", "wb")
imgdata = base64.b64decode(req_data["pic_str"])
fh.write(imgdata)
fh.close()
return ("Image saved"),201
api.add_resource(Picture,"/picPost")
app.run(host= "192.168.1.107", port = 2935, debug = True)
|
import sys
import c
VAL = 20
print("The real value of c.func is: {}".format(c.func(VAL)))
# ideally, you would have never imported this module
sys.modules.pop("c")
# create reference to real import so we don't lose it
a_real_import = __import__("a")
a_fake_import = __import__("b")
# fake the import
sys.modules["a"] = a_fake_import
import c
# set it back to the real value
sys.modules["a"] = a_real_import
print("The fake value of c.func is: {}".format(c.func(VAL))) |
"""model for text2sql
03.11.2020 - @yashbonde"""
import numpy as np
from tabulate import tabulate
from types import SimpleNamespace
import torch
import torch.nn as nn
from torch.nn import functional as F
from transformers.modeling_utils import find_pruneable_heads_and_indices, prune_conv1d_layer
from transformers.activations import ACT2FN
# the code below is only a slighlty modified version from huggingface.
class Conv1D(nn.Module):
def __init__(self, nf, nx):
super().__init__()
self.nf = nf
w = torch.empty(nx, nf)
nn.init.normal_(w, std=0.02)
self.weight = nn.Parameter(w)
self.bias = nn.Parameter(torch.zeros(nf))
def forward(self, x):
size_out = x.size()[:-1] + (self.nf,)
x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
x = x.view(*size_out)
return x
class Attention(nn.Module):
def __init__(self, nx, n_ctx, config, scale=False, is_cross_attention=False):
super().__init__()
n_state = nx # in Attention: n_state=768 (nx=n_embd)
# [switch nx => n_state from Block to Attention to keep identical to TF implem]
assert n_state % config.n_head == 0
self.n_head = config.n_head
self.split_size = n_state
self.scale = scale
self.is_cross_attention = is_cross_attention
if self.is_cross_attention:
self.c_attn = Conv1D(2 * n_state, nx)
self.q_attn = Conv1D(n_state, nx)
else:
self.c_attn = Conv1D(3 * n_state, nx)
self.c_proj = Conv1D(n_state, nx)
self.attn_dropout = nn.Dropout(config.attn_pdrop)
self.resid_dropout = nn.Dropout(config.resid_pdrop)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
heads, index = find_pruneable_heads_and_indices(
heads, self.n_head, self.split_size // self.n_head, self.pruned_heads
)
index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
# Prune conv1d layers
self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
# Update hyper params
self.split_size = (self.split_size // self.n_head) * (self.n_head - len(heads))
self.n_head = self.n_head - len(heads)
self.pruned_heads = self.pruned_heads.union(heads)
def _attn(self, q, k, v, attention_mask=None, head_mask=None, output_attentions=False):
w = torch.matmul(q, k)
if self.scale:
w = w / (float(v.size(-1)) ** 0.5)
# print("((((", w.size(), attention_mask.size())
if attention_mask is not None:
# reshape the attention mask to fit the weight matrix
# print(attention_mask[0, 0, :w.size(2), :])
# Apply the attention mask
w = w + attention_mask[:, :, :w.size(2), :]
w = nn.Softmax(dim=-1)(w)
w = self.attn_dropout(w)
# print(w.size(), v.size())
outputs = [torch.matmul(w, v)]
if output_attentions:
outputs.append(w)
return outputs
def merge_heads(self, x):
x = x.permute(0, 2, 1, 3).contiguous()
new_x_shape = x.size()[:-2] + (x.size(-2) * x.size(-1),)
return x.view(*new_x_shape) # in Tensorflow implem: fct merge_states
def split_heads(self, x, k=False):
new_x_shape = x.size()[:-1] + (self.n_head, x.size(-1) // self.n_head)
x = x.view(*new_x_shape) # in Tensorflow implem: fct split_states
if k:
return x.permute(0, 2, 3, 1) # (batch, head, head_features, seq_length)
else:
return x.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
def forward(
self,
hidden_states,
layer_past=None,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
use_cache=False,
output_attentions=False,
):
if encoder_hidden_states is not None:
assert hasattr(
self, "q_attn"
), "If class is used as cross attention, the weights `q_attn` have to be defined. Please make sure to instantiate class with `Attention(..., is_cross_attention=True)`."
query = self.q_attn(hidden_states)
key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
attention_mask = encoder_attention_mask
else:
query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
query = self.split_heads(query)
key = self.split_heads(key, k=True)
value = self.split_heads(value)
if layer_past is not None:
past_key, past_value = layer_past[0].transpose(-2, -1), layer_past[1] # transpose back cf below
key = torch.cat((past_key, key), dim=-1)
value = torch.cat((past_value, value), dim=-2)
if use_cache is True:
present = torch.stack((key.transpose(-2, -1), value)) # transpose to have same shapes for stacking
else:
present = (None,)
# print(query.size(), key.size(), value.size())
attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions)
a = attn_outputs[0]
a = self.merge_heads(a)
a = self.c_proj(a)
a = self.resid_dropout(a)
outputs = [a, present] + attn_outputs[1:]
return outputs # a, present, (attentions)
class MLP(nn.Module):
def __init__(self, n_state, config): # in MLP: n_state=3072 (4 * n_embd)
super().__init__()
nx = config.n_embd
self.c_fc = Conv1D(n_state, nx)
self.c_proj = Conv1D(nx, n_state)
self.act = ACT2FN[config.activation_function]
self.dropout = nn.Dropout(config.resid_pdrop)
def forward(self, x):
h = self.act(self.c_fc(x))
h2 = self.c_proj(h)
return self.dropout(h2)
class Block(nn.Module):
def __init__(self, config, n_ctx, add_cross_attention = False, scale=False):
super().__init__()
hidden_size= config.n_embd
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.attn = Attention(hidden_size, n_ctx, config, scale)
self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
if add_cross_attention:
self.crossattention = Attention(hidden_size, n_ctx, config, scale, is_cross_attention=True)
self.ln_cross_attn = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
self.mlp = MLP(inner_dim, config)
def forward(self, x):
# this was not taking key word arguments in Sequential so need to pass around a tuple
# so now I understood why huggingface coded by passing around lists, stupid!
type_ =x[0]
# print("^^^^", type_, len(x))
if type_ in ["encoder", "self"]:
(hidden_states, attention_mask) = x[1:]
else:
(hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask) = x[1:]
attn_outputs = self.attn(
self.ln_1(hidden_states),
attention_mask=attention_mask,
)
attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
outputs = attn_outputs[1:]
hidden_states = attn_output + hidden_states # residual connection
if type_ == "decoder":
# add one self-attention block for cross-attention
assert hasattr(
self, "crossattention"
), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
cross_attn_outputs = self.crossattention(
self.ln_cross_attn(hidden_states),
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
attn_output = cross_attn_outputs[0]
# residual connection
hidden_states = hidden_states + attn_output
outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
feed_forward_hidden_states = self.mlp(self.ln_2(hidden_states))
# residual connection
hidden_states = hidden_states + feed_forward_hidden_states
outputs = [hidden_states] + outputs
if type_ in ["encoder", "self"]:
out = (type_, hidden_states, attention_mask)
else:
out = (type_, hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask)
return out
class Decoder(nn.Module):
def __init__(self, config):
super(Decoder, self).__init__()
blocks = []
for i in range(config.n_decoder_layers):
blocks.append(Block(config, n_ctx = config.maxlen, add_cross_attention = False)) # casual
blocks.append(Block(config, n_ctx = config.maxlen, add_cross_attention = True)) # sent
blocks.append(Block(config, n_ctx = config.maxlen, add_cross_attention = True)) # db
self.blocks = nn.Sequential(*blocks)
self.ln = nn.LayerNorm(config.n_embd, eps = config.layer_norm_epsilon)
def forward(self, x):
# this was not taking key word arguments in Sequential so need to pass around a tuple
(hidden_states_sql, attention_mask_sql, hidden_states_sent, attention_mask_sent, hidden_states_db, attention_mask_db) = x
hidden_states = hidden_states_sql
l = 0
for i, block in enumerate(self.blocks):
if l == 0: # casual attention
outputs = block(("self", hidden_states, attention_mask_sql))
l = 1
elif l == 1: # sentence attention
outputs = block(("decoder", hidden_states, attention_mask_sql, hidden_states_sent, attention_mask_sent))
l = 2
else: # db attention
outputs = block(("decoder", hidden_states, attention_mask_sql, hidden_states_db, attention_mask_db))
l = 0
hidden_states = outputs[1]
hidden_states_sql = hidden_states
return (hidden_states_sql, attention_mask_sql,
hidden_states_sent, attention_mask_sent,
hidden_states_db, attention_mask_db)
class Text2SQLModel(nn.Module):
def __init__(self, config):
super(Text2SQLModel, self).__init__()
self.config = config
self.embedding = nn.Embedding(config.vocab_size, config.n_embd)
# self.wte_sent = nn.Embedding(config.maxlen, config.n_embd)
# self.wte_db = nn.Embedding(config.maxlen, config.n_embd)
# self.wte_sql = nn.Embedding(config.maxlen, config.n_embd)
# using embedding and position IDs isn't really going well so will use Parameter
self.wte_sent = nn.Parameter(torch.zeros(config.maxlen, config.n_embd))
self.wte_db = nn.Parameter(torch.zeros(config.maxlen, config.n_embd))
self.wte_sql = nn.Parameter(torch.zeros(config.maxlen, config.n_embd))
self.sentence_encoder = nn.Sequential(*[
Block(config, n_ctx=config.maxlen, add_cross_attention=False)
for _ in range(config.n_sent_layers)
])
self.db_encoder = nn.Sequential(*[
Block(config, n_ctx=config.maxlen, add_cross_attention=False)
for _ in range(config.n_db_layers)
])
self.decoder = Decoder(config)
self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias = False)
self.apply(self._init_weights)
print("number of parameters:", sum(p.numel() for p in self.parameters()))
def _init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0, std=0.02)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def configure_optimizers(self, train_config):
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# # separate out all parameters to those that will and won't experience regularizing weight decay
# decay = set()
# no_decay = set()
# whitelist_weight_modules = (torch.nn.Linear, )
# blacklist_weight_modules = (torch.nn.LayerNorm, torch.nn.Embedding)
# for mn, m in self.named_modules():
# for pn, p in m.named_parameters():
# # print(mn, "--", pn)
# fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
# print(fpn, type(m))
# if fpn.endswith('bias'):
# # all biases will not be decayed
# no_decay.add(fpn)
# print(fpn, "--", 1)
# elif fpn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# # weights of whitelist modules will be weight decayed
# decay.add(fpn)
# print(fpn, "--", 2)
# elif fpn.endswith('weight') and isinstance(m, blacklist_weight_modules):
# # weights of blacklist modules will NOT be weight decayed
# no_decay.add(fpn)
# print(fpn, "--", 3)
# print()
# # validate that we considered every parameter
# param_dict = {pn: p for pn, p in self.named_parameters()}
# inter_params = decay & no_decay
# union_params = decay | no_decay
# assert len(inter_params) == 0, "parameters %s made it into both decay/no_decay sets!" % (str(inter_params), )
# assert len(param_dict.keys() - union_params) == 0, "parameters %s were not separated into either decay/no_decay set!" \
# % (str(param_dict.keys() - union_params), )
# # create the pytorch optimizer object
# optim_groups = [
# {"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": train_config.weight_decay},
# {"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
# ]
optimizer = torch.optim.Adam(self.parameters(), lr=train_config.lr, betas=train_config.betas)
return optimizer
def get_position_ids(self, input, past_length, device):
input_shape = input.size()
position_ids = torch.arange(past_length, input_shape[-1] + past_length).long().to(device)
position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
print("**", position_ids.device)
return position_ids
def encoder_fn(self, sent, db, sent_attn, db_attn, device):
B, T = sent.size()
sent = self.embedding(sent) + self.wte_sent[:T,:]
db = self.embedding(db) + self.wte_db[:T,:]
sent_hidden_states = self.sentence_encoder(("encoder", sent, sent_attn),)[1]
db_hidden_states = self.db_encoder(("encoder", db, db_attn),)[1]
return SimpleNamespace(
sent_hidden_states=sent_hidden_states,
db_hidden_states=db_hidden_states,
sent_attn=sent_attn,
db_attn=db_attn
)
def decoder_fn(self, enc_out, sql_ids, sql_attn, device):
B, T = sql_ids.size()
sql = self.embedding(sql_ids) + self.wte_sql[:T,:]
sql_output = self.decoder((sql, sql_attn, enc_out.sent_hidden_states,
enc_out.sent_attn, enc_out.db_hidden_states, enc_out.db_attn),)[0]
sql_output = self.lm_head(sql_output)
return sql_output
def forward(self, sql_ids, sent, db, sql_attn, sent_attn, db_attn, labels=None, past_length=0, device="cpu"):
B, T = sql_ids.size()
# make the embeddings
sql = self.embedding(sql_ids) + self.wte_sql[:T,:]
sent = self.embedding(sent) + self.wte_sent[:T,:]
db = self.embedding(db) + self.wte_db[:T,:]
# get hidden_states for sentence_encoder
sent_hidden_states = self.sentence_encoder(("encoder", sent, sent_attn),)[1]
db_hidden_states = self.db_encoder(("encoder", db, db_attn),)[1]
sql_output = self.decoder((sql, sql_attn, sent_hidden_states, sent_attn, db_hidden_states, db_attn),)[0]
sql_output = self.lm_head(sql_output)
output = [sql_output]
if labels is not None:
labels = labels.contiguous()
logits = sql_output.contiguous()
# loss_fct = nn.CrossEntropyLoss(reduction="none")
# loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
loss_fct = nn.CrossEntropyLoss(reduction="mean")
loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1))
# # get the indexes where the labels are not [PAD]
# non_pad_mask = sql_attn[:, :, 0, 1:].contiguous().view(-1) == 0
# print(loss.size(), non_pad_mask.size())
# non_pad_loss = loss[non_pad_mask]
# print("non_pad_loss", non_pad_loss)
# loss = non_pad_loss.mean()
output = [loss] + output
return output
class Text2SQLModelConfig():
vocab_size = 5012
n_embd = 256
maxlen = 128
n_decoder_layers = 2
n_sent_layers = 3
n_db_layers = 3
n_head = 8
n_inner = None
activation_function = "gelu_new"
resid_pdrop = 0.1
embd_pdrop = 0.1
attn_pdrop = 0.1
layer_norm_epsilon = 0.00001
initializer_range = 0.02
def __init__(self, **kwargs):
self.attrs = [
"vocab_size",
"n_embd",
"maxlen",
"n_decoder_layers",
"n_sent_layers",
"n_db_layers",
"n_head",
"n_inner",
"activation_function",
"resid_pdrop",
"embd_pdrop",
"attn_pdrop",
"layer_norm_epsilon",
"initializer_range",
]
for k, v in kwargs.items():
self.attrs.append(k)
setattr(self, k, v)
def __repr__(self):
kvs = [(k, f"{getattr(self, k)}") for k in sorted(list(set(self.attrs)))]
return tabulate(kvs, ["argument", "value"], tablefmt="psql")
# ====== Sampling Utils ====== #
def top_k_logits(logits, k):
v, ix = torch.topk(logits, k)
out = logits.clone()
out[out < v[:, [-1]]] = -1e6
return out
@torch.no_grad()
def sample(model, sent, sent_attn, db, db_attn, t, sql_str = None, device="cpu", steps=50, temperature=50, top_k=None):
model.eval()
sent = sent.view(-1, sent.size(0))
db = db.view(-1, db.size(0))
sent_attn = sent_attn.view(-1, *sent_attn.size())
db_attn = db_attn.view(-1, *db_attn.size())
# print(sent.size(), db.size(), sent_attn.size(), db_attn.size())
enc_out = model.encoder_fn(sent, db, sent_attn, db_attn, device=device)
# convert string to sql_tokens
if sql_str is not None:
sql = torch.from_numpy(np.asarray([t.encode(sql_str)])).long()
else:
sql = torch.from_numpy(np.asarray([t.bos_id()])).view(-1, 1).long()
sql = sql.to(device)
# final sequence
out = []
for k in range(steps):
if k == 0:
x = sql if sql.size(1) < model.config.maxlen else sql[:, -model.config.maxlen:]
else:
x = x if x.size(1) < model.config.maxlen else x[:, -model.config.maxlen:]
sql_attn = np.zeros((x.size(1), x.size(1)))
sql_attn[:x.size(1), :x.size(1)] = 1
sql_attn = sql_attn - np.triu(sql_attn, k = 1) # casual masking
sql_attn = 1 - sql_attn
sql_attn = sql_attn * -1e6
sql_attn = torch.from_numpy(sql_attn.astype(np.float32)).to(device).view(1, 1, *sql_attn.shape)
# print("****", x.size(), sql_attn.size())
logits = model.decoder_fn(enc_out, x, sql_attn, device=device)
# pluck the logits at the final step and scale by temperature
logits = logits[:, -1, :] / temperature
# optionally crop probabilities to only the top k options
if top_k is not None:
logits = top_k_logits(logits, top_k)
# apply softmax to convert to probabilities
probs = F.softmax(logits, dim=-1)
# sample from the distribution or take the most likely
if sample:
ix = torch.multinomial(probs, num_samples=1)
else:
_, ix = torch.topk(probs, k=1, dim=-1)
# append to the sequence and continue
x = torch.cat((x, ix), dim=1)
out.append(ix[0].tolist()[0])
return t.decode_ids(out)
|
#use groupby to count the sum of each unique value in the fuel unit column
fuel_data.groupby('fuel_unit')['fuel_unit'].count()
fuel_data[['fuel_unit']] = fuel_data[['fuel_unit']].fillna(value='mcf')
#check if missing values have been filled
fuel_data.isnull().sum()
fuel_data.groupby('report_year')['report_year'].count()
#group by the fuel type code year and print the first entries in all the groups formed
fuel_data.groupby('fuel_type_code_pudl').first()
|
from pysam import FastaFile
import re
from tqdm import tqdm
import string
import os
from collections import defaultdict
CURRENT_DIR = os.path.dirname(os.path.abspath(__file__)) + '/'
DATA_DIR = os.path.join(CURRENT_DIR, '..', 'data/')
MM10_FASTQ = DATA_DIR + 'mm10.fa'
MOUSE_GTF = DATA_DIR + 'Mus_musculus.GRCm38.91.chr.gtf'
if not os.path.exists(MOUSE_GTF):
print 'please unzip Mus_musculus.GRCm38.91.chr.gtf.gz first'
sys.exit(0)
REVERSE_COMPLIMENT_TAB = string.maketrans("ACTG", "TGAC")
def fetch_seq(seq_fetch_obj, chrom, start, end, strand):
seq = seq_fetch_obj.fetch(chrom, start, end).upper()
if strand == '-':
seq = seq.translate(REVERSE_COMPLIMENT_TAB)[::-1]
return seq
def get_transcript_cds():
fp = open(MOUSE_GTF)
dict_transcript_cds = {}
for index, line in enumerate(fp):
if line.startswith('#'):
continue
sp = line.strip().split('\t')
if sp[2] == 'gene':
continue
if sp[2] == 'transcript':
transcript_id = re.sub('.*transcript_id "|\".*', '', sp[8])
dict_transcript_cds[transcript_id] = {'CDS':[], 'start_codon':[], 'stop_codon':[], 'strand':sp[6], 'chrom':sp[0], 'exon':[]}
if sp[2] == 'CDS':
dict_transcript_cds[transcript_id]['CDS'].append((int(sp[3]) - 1, int(sp[4])))
if sp[2] == 'start_codon':
dict_transcript_cds[transcript_id]['start_codon'] = [int(sp[3]) - 1, int(sp[4])]
if sp[2] == 'stop_codon':
dict_transcript_cds[transcript_id]['stop_codon'] = [int(sp[3]) - 1, int(sp[4])]
if sp[2] == 'exon':
dict_transcript_cds[transcript_id]['exon'].append([int(sp[3]) - 1, int(sp[4])])
fp.close()
dict_transcript_cds_has_start_stop_codon = {}
for transcript_id in dict_transcript_cds:
## require the CDS has clear start codon and stop codon annotation
if len(dict_transcript_cds[transcript_id]['start_codon']) > 1 and len(dict_transcript_cds[transcript_id]['stop_codon']) > 1:
dict_transcript_cds_has_start_stop_codon[transcript_id] = dict_transcript_cds[transcript_id]
return dict_transcript_cds_has_start_stop_codon
def check_stop_codon(seq):
seq = re.findall('...', seq)
if 'TAG' in seq or 'TAA' in seq or 'TGA' in seq:
return True
return False
def get_stop_codon_pos(seq):
seq = re.findall('...', seq)
if 'TAG' in seq:
return seq.index('TAG'), 'TAG'
if 'TAA' in seq:
return seq.index('TAA'), 'TAA'
if 'TGA' in seq:
return seq.index('TGA'), 'TGA'
def get_intron_PTC():
## Our annotation is based on whole transcript reading frame.
## We investigate the effect of intron per transcript.
U_Intron_List = []
# get U introns from clustering results in step 2
for cell in ['ESC', 'NPC', 'Ctx']:
fp = open(CURRENT_DIR + '../xmeans_cluster_U_intron_2/{}.cluster.polyA.txt'.format(cell))
fp.readline()
for line in fp:
sp = line.strip().split('\t')
U_Intron_List.append(sp[-1])
fp.close()
U_Intron_List = list(set(U_Intron_List))
CHROM_INDEX_LIST = ['chr' + str(x) for x in xrange(1, 20)] + ['chrX']
seq_fetch_obj = FastaFile(MM10_FASTQ)
dict_transcript_cds = get_transcript_cds()
intron_id2transcripts = {}
fp = open(DATA_DIR + 'Intron_transcript.txt')
for line in fp:
sp = line.strip().split('\t')
intron_id2transcripts[sp[0]] = sp[1:]
fp.close()
fw = open(CURRENT_DIR + 'Intron_PTC_results.txt', 'w')
fw.write('SIRI_ID\tTranscript_ID\tPTC\n')
for intron_id in tqdm(U_Intron_List, total = len(U_Intron_List)):
transcript_list = intron_id2transcripts[intron_id]
sq = intron_id.split('_')
chrom, start, end, strand = sq[0], int(sq[1]) -1, int(sq[2]), sq[3]
if chrom not in CHROM_INDEX_LIST:
continue
for transcript in transcript_list:
if transcript not in dict_transcript_cds:
continue
cds_exon_list = dict_transcript_cds[transcript]['CDS']
exon_list = dict_transcript_cds[transcript]['exon']
stop_codon_pos = dict_transcript_cds[transcript]['stop_codon']
if strand == '+':
last_intron = chrom + '_' + '{}_{}_{}'.format(exon_list[-2][1] + 1, exon_list[-1][0], strand)
if len(exon_list) >= 3:
second_last_intron = chrom + '_' + '{}_{}_{}'.format(exon_list[-3][1] + 1, exon_list[-2][0], strand)
else:
second_last_intron == ''
if stop_codon_pos[1] <= start:
continue
if cds_exon_list[0][0] >= end:
continue
else:
last_intron = chrom + '_' + '{}_{}_{}'.format(exon_list[-1][1] + 1, exon_list[-2][0], strand)
if len(exon_list) >= 3:
second_last_intron = chrom + '_' + '{}_{}_{}'.format(exon_list[-2][1] + 1, exon_list[-3][0], strand)
else:
second_last_intron = ''
if stop_codon_pos[0] >= end:
continue
if cds_exon_list[0][1] <= start:
continue
cds_seq = ''
cds_intron_seq = ''
if strand == '+':
for cds in cds_exon_list:
seq = fetch_seq(seq_fetch_obj, chrom, cds[0], cds[1], strand)
cds_seq += seq
cds_intron_seq += seq
if cds[1] == start:
cds_intron_seq += fetch_seq(seq_fetch_obj, chrom, start, end, strand)
if strand == '-':
for cds in cds_exon_list:
seq = fetch_seq(seq_fetch_obj, chrom, cds[0], cds[1], strand)
cds_seq += seq
cds_intron_seq += seq
if cds[0] == end:
cds_intron_seq += fetch_seq(seq_fetch_obj, chrom, start, end, strand)
if check_stop_codon(cds_seq):
continue
## if intron is last intron, it would not cause NMD. We skip intron if it is the last intron of the transcript.
if intron_id == last_intron:
continue
ptc_flag = False
if check_stop_codon(cds_intron_seq):
intron_seq = fetch_seq(seq_fetch_obj, chrom, start, end, strand)
pos, stop_codon = get_stop_codon_pos(cds_intron_seq)
intron_junc_dis = cds_intron_seq.index(intron_seq) + end - start - pos * 3 - 3
if intron_junc_dis < 0:
continue
if second_last_intron == intron_id: ## 50nt PTC rule
if intron_junc_dis + exon_list[-2][1] - exon_list[-2][0] >= 50:
ptc_flag = True
else:
ptc_flag = True
if ptc_flag:
fw.write('{}\t{}\t{}\n'.format(intron_id, transcript, 'True'))
else:
fw.write('{}\t{}\t{}\n'.format(intron_id, transcript, 'False'))
fw.close()
def analyze_intron_cluster_ptc():
dict_ptc_info = defaultdict(lambda:[])
fp = open(CURRENT_DIR + 'Intron_PTC_results.txt')
fp.readline()
for line in fp:
sp = line.strip().split('\t')
dict_ptc_info[sp[0]].append((sp[1], sp[2]))
fp.close()
fw = open(CURRENT_DIR + 'ptc_response_xmeans.txt', 'w')
fw.write('cell\tcategory\tproportion\ttype\n')
for c_index, cell_name in enumerate(['ESC', 'NPC', 'Ctx']):
fp = open(CURRENT_DIR + '../xmeans_cluster_U_intron_2/{}.cluster.polyA.txt'.format(cell_name))
fp.readline()
cluster = {}
for line in fp:
sp = line.strip().split('\t')
cluster[sp[-1]] = sp[1].split(':')[0]
fp.close()
cluster_name_list = set(cluster.values())
for cluster_name in sorted(cluster_name_list):
ptc_count, no_ptc_count = 0, 0
for intron in cluster:
if cluster[intron] != cluster_name:
continue
if intron not in dict_ptc_info:
continue
ptc_flag = False
for ptc_info in dict_ptc_info[intron]:
txID, ptc = ptc_info[0], ptc_info[1]
if ptc == 'True':
ptc_flag = True
if ptc_flag:
ptc_count += 1
else:
no_ptc_count += 1
fw.write('{}\t{}\t{}\t{}\n'.format(cell_name, cluster_name, ptc_count / (ptc_count + no_ptc_count + 0.0), '+ PTC'))
fw.write('{}\t{}\t{}\t{}\n'.format(cell_name, cluster_name, no_ptc_count / (ptc_count + no_ptc_count + 0.0), '- PTC'))
fw.close()
def main():
get_intron_PTC()
analyze_intron_cluster_ptc()
if __name__=="__main__":
main() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.